[Midnightbsd-cvs] src [10117] trunk/sys/dev/drm2: add drm2

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Sun May 27 20:14:03 EDT 2018


Revision: 10117
          http://svnweb.midnightbsd.org/src/?rev=10117
Author:   laffer1
Date:     2018-05-27 20:14:02 -0400 (Sun, 27 May 2018)
Log Message:
-----------
add drm2

Modified Paths:
--------------
    trunk/sys/dev/drm2/drm.h
    trunk/sys/dev/drm2/drmP.h
    trunk/sys/dev/drm2/drm_agpsupport.c
    trunk/sys/dev/drm2/drm_atomic.h
    trunk/sys/dev/drm2/drm_auth.c
    trunk/sys/dev/drm2/drm_bufs.c
    trunk/sys/dev/drm2/drm_context.c
    trunk/sys/dev/drm2/drm_crtc.c
    trunk/sys/dev/drm2/drm_crtc.h
    trunk/sys/dev/drm2/drm_crtc_helper.c
    trunk/sys/dev/drm2/drm_crtc_helper.h
    trunk/sys/dev/drm2/drm_dma.c
    trunk/sys/dev/drm2/drm_dp_helper.h
    trunk/sys/dev/drm2/drm_dp_iic_helper.c
    trunk/sys/dev/drm2/drm_drv.c
    trunk/sys/dev/drm2/drm_edid.c
    trunk/sys/dev/drm2/drm_edid.h
    trunk/sys/dev/drm2/drm_edid_modes.h
    trunk/sys/dev/drm2/drm_fb_helper.c
    trunk/sys/dev/drm2/drm_fb_helper.h
    trunk/sys/dev/drm2/drm_fops.c
    trunk/sys/dev/drm2/drm_fourcc.h
    trunk/sys/dev/drm2/drm_gem.c
    trunk/sys/dev/drm2/drm_gem_names.c
    trunk/sys/dev/drm2/drm_gem_names.h
    trunk/sys/dev/drm2/drm_global.c
    trunk/sys/dev/drm2/drm_global.h
    trunk/sys/dev/drm2/drm_hashtab.c
    trunk/sys/dev/drm2/drm_hashtab.h
    trunk/sys/dev/drm2/drm_ioc32.c
    trunk/sys/dev/drm2/drm_ioctl.c
    trunk/sys/dev/drm2/drm_irq.c
    trunk/sys/dev/drm2/drm_linux_list.h
    trunk/sys/dev/drm2/drm_linux_list_sort.c
    trunk/sys/dev/drm2/drm_lock.c
    trunk/sys/dev/drm2/drm_memory.c
    trunk/sys/dev/drm2/drm_mm.c
    trunk/sys/dev/drm2/drm_mm.h
    trunk/sys/dev/drm2/drm_mode.h
    trunk/sys/dev/drm2/drm_modes.c
    trunk/sys/dev/drm2/drm_pci.c
    trunk/sys/dev/drm2/drm_pciids.h
    trunk/sys/dev/drm2/drm_sarea.h
    trunk/sys/dev/drm2/drm_scatter.c
    trunk/sys/dev/drm2/drm_stub.c
    trunk/sys/dev/drm2/drm_sysctl.c
    trunk/sys/dev/drm2/drm_vm.c
    trunk/sys/dev/drm2/i915/i915_debug.c
    trunk/sys/dev/drm2/i915/i915_dma.c
    trunk/sys/dev/drm2/i915/i915_drm.h
    trunk/sys/dev/drm2/i915/i915_drv.c
    trunk/sys/dev/drm2/i915/i915_drv.h
    trunk/sys/dev/drm2/i915/i915_gem.c
    trunk/sys/dev/drm2/i915/i915_gem_evict.c
    trunk/sys/dev/drm2/i915/i915_gem_execbuffer.c
    trunk/sys/dev/drm2/i915/i915_gem_gtt.c
    trunk/sys/dev/drm2/i915/i915_gem_tiling.c
    trunk/sys/dev/drm2/i915/i915_ioc32.c
    trunk/sys/dev/drm2/i915/i915_irq.c
    trunk/sys/dev/drm2/i915/i915_reg.h
    trunk/sys/dev/drm2/i915/i915_suspend.c
    trunk/sys/dev/drm2/i915/intel_bios.c
    trunk/sys/dev/drm2/i915/intel_bios.h
    trunk/sys/dev/drm2/i915/intel_crt.c
    trunk/sys/dev/drm2/i915/intel_display.c
    trunk/sys/dev/drm2/i915/intel_dp.c
    trunk/sys/dev/drm2/i915/intel_drv.h
    trunk/sys/dev/drm2/i915/intel_fb.c
    trunk/sys/dev/drm2/i915/intel_hdmi.c
    trunk/sys/dev/drm2/i915/intel_iic.c
    trunk/sys/dev/drm2/i915/intel_lvds.c
    trunk/sys/dev/drm2/i915/intel_modes.c
    trunk/sys/dev/drm2/i915/intel_opregion.c
    trunk/sys/dev/drm2/i915/intel_overlay.c
    trunk/sys/dev/drm2/i915/intel_panel.c
    trunk/sys/dev/drm2/i915/intel_ringbuffer.c
    trunk/sys/dev/drm2/i915/intel_ringbuffer.h
    trunk/sys/dev/drm2/i915/intel_sdvo.c
    trunk/sys/dev/drm2/i915/intel_sdvo_regs.h
    trunk/sys/dev/drm2/i915/intel_sprite.c
    trunk/sys/dev/drm2/i915/intel_tv.c

Added Paths:
-----------
    trunk/sys/dev/drm2/ati_pcigart.c
    trunk/sys/dev/drm2/drm_buffer.c
    trunk/sys/dev/drm2/drm_buffer.h
    trunk/sys/dev/drm2/drm_dp_helper.c
    trunk/sys/dev/drm2/radeon/
    trunk/sys/dev/drm2/radeon/ObjectID.h
    trunk/sys/dev/drm2/radeon/README
    trunk/sys/dev/drm2/radeon/atom-bits.h
    trunk/sys/dev/drm2/radeon/atom-names.h
    trunk/sys/dev/drm2/radeon/atom-types.h
    trunk/sys/dev/drm2/radeon/atom.c
    trunk/sys/dev/drm2/radeon/atom.h
    trunk/sys/dev/drm2/radeon/atombios.h
    trunk/sys/dev/drm2/radeon/atombios_crtc.c
    trunk/sys/dev/drm2/radeon/atombios_dp.c
    trunk/sys/dev/drm2/radeon/atombios_encoders.c
    trunk/sys/dev/drm2/radeon/atombios_i2c.c
    trunk/sys/dev/drm2/radeon/avivod.h
    trunk/sys/dev/drm2/radeon/cayman_blit_shaders.c
    trunk/sys/dev/drm2/radeon/cayman_blit_shaders.h
    trunk/sys/dev/drm2/radeon/cayman_reg_safe.h
    trunk/sys/dev/drm2/radeon/evergreen.c
    trunk/sys/dev/drm2/radeon/evergreen_blit_kms.c
    trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.c
    trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.h
    trunk/sys/dev/drm2/radeon/evergreen_cs.c
    trunk/sys/dev/drm2/radeon/evergreen_hdmi.c
    trunk/sys/dev/drm2/radeon/evergreen_reg.h
    trunk/sys/dev/drm2/radeon/evergreen_reg_safe.h
    trunk/sys/dev/drm2/radeon/evergreend.h
    trunk/sys/dev/drm2/radeon/ni.c
    trunk/sys/dev/drm2/radeon/ni_reg.h
    trunk/sys/dev/drm2/radeon/nid.h
    trunk/sys/dev/drm2/radeon/r100.c
    trunk/sys/dev/drm2/radeon/r100_reg_safe.h
    trunk/sys/dev/drm2/radeon/r100_track.h
    trunk/sys/dev/drm2/radeon/r100d.h
    trunk/sys/dev/drm2/radeon/r200.c
    trunk/sys/dev/drm2/radeon/r200_reg_safe.h
    trunk/sys/dev/drm2/radeon/r300.c
    trunk/sys/dev/drm2/radeon/r300_cmdbuf.c
    trunk/sys/dev/drm2/radeon/r300_reg.h
    trunk/sys/dev/drm2/radeon/r300_reg_safe.h
    trunk/sys/dev/drm2/radeon/r300d.h
    trunk/sys/dev/drm2/radeon/r420.c
    trunk/sys/dev/drm2/radeon/r420_reg_safe.h
    trunk/sys/dev/drm2/radeon/r420d.h
    trunk/sys/dev/drm2/radeon/r500_reg.h
    trunk/sys/dev/drm2/radeon/r520.c
    trunk/sys/dev/drm2/radeon/r520d.h
    trunk/sys/dev/drm2/radeon/r600.c
    trunk/sys/dev/drm2/radeon/r600_audio.c
    trunk/sys/dev/drm2/radeon/r600_blit.c
    trunk/sys/dev/drm2/radeon/r600_blit_kms.c
    trunk/sys/dev/drm2/radeon/r600_blit_shaders.c
    trunk/sys/dev/drm2/radeon/r600_blit_shaders.h
    trunk/sys/dev/drm2/radeon/r600_cp.c
    trunk/sys/dev/drm2/radeon/r600_cp.h
    trunk/sys/dev/drm2/radeon/r600_cs.c
    trunk/sys/dev/drm2/radeon/r600_cs.h
    trunk/sys/dev/drm2/radeon/r600_hdmi.c
    trunk/sys/dev/drm2/radeon/r600_reg.h
    trunk/sys/dev/drm2/radeon/r600_reg_safe.h
    trunk/sys/dev/drm2/radeon/r600d.h
    trunk/sys/dev/drm2/radeon/radeon.h
    trunk/sys/dev/drm2/radeon/radeon_acpi.c
    trunk/sys/dev/drm2/radeon/radeon_acpi.h
    trunk/sys/dev/drm2/radeon/radeon_agp.c
    trunk/sys/dev/drm2/radeon/radeon_asic.c
    trunk/sys/dev/drm2/radeon/radeon_asic.h
    trunk/sys/dev/drm2/radeon/radeon_atombios.c
    trunk/sys/dev/drm2/radeon/radeon_benchmark.c
    trunk/sys/dev/drm2/radeon/radeon_bios.c
    trunk/sys/dev/drm2/radeon/radeon_blit_common.h
    trunk/sys/dev/drm2/radeon/radeon_clocks.c
    trunk/sys/dev/drm2/radeon/radeon_combios.c
    trunk/sys/dev/drm2/radeon/radeon_connectors.c
    trunk/sys/dev/drm2/radeon/radeon_cp.c
    trunk/sys/dev/drm2/radeon/radeon_cs.c
    trunk/sys/dev/drm2/radeon/radeon_cursor.c
    trunk/sys/dev/drm2/radeon/radeon_device.c
    trunk/sys/dev/drm2/radeon/radeon_display.c
    trunk/sys/dev/drm2/radeon/radeon_drm.h
    trunk/sys/dev/drm2/radeon/radeon_drv.c
    trunk/sys/dev/drm2/radeon/radeon_drv.h
    trunk/sys/dev/drm2/radeon/radeon_encoders.c
    trunk/sys/dev/drm2/radeon/radeon_family.h
    trunk/sys/dev/drm2/radeon/radeon_fb.c
    trunk/sys/dev/drm2/radeon/radeon_fence.c
    trunk/sys/dev/drm2/radeon/radeon_gart.c
    trunk/sys/dev/drm2/radeon/radeon_gem.c
    trunk/sys/dev/drm2/radeon/radeon_gem.h
    trunk/sys/dev/drm2/radeon/radeon_i2c.c
    trunk/sys/dev/drm2/radeon/radeon_ioc32.c
    trunk/sys/dev/drm2/radeon/radeon_irq.c
    trunk/sys/dev/drm2/radeon/radeon_irq_kms.c
    trunk/sys/dev/drm2/radeon/radeon_irq_kms.h
    trunk/sys/dev/drm2/radeon/radeon_kms.c
    trunk/sys/dev/drm2/radeon/radeon_kms.h
    trunk/sys/dev/drm2/radeon/radeon_legacy_crtc.c
    trunk/sys/dev/drm2/radeon/radeon_legacy_encoders.c
    trunk/sys/dev/drm2/radeon/radeon_legacy_tv.c
    trunk/sys/dev/drm2/radeon/radeon_mem.c
    trunk/sys/dev/drm2/radeon/radeon_mode.h
    trunk/sys/dev/drm2/radeon/radeon_object.c
    trunk/sys/dev/drm2/radeon/radeon_object.h
    trunk/sys/dev/drm2/radeon/radeon_pm.c
    trunk/sys/dev/drm2/radeon/radeon_prime.c
    trunk/sys/dev/drm2/radeon/radeon_reg.h
    trunk/sys/dev/drm2/radeon/radeon_ring.c
    trunk/sys/dev/drm2/radeon/radeon_sa.c
    trunk/sys/dev/drm2/radeon/radeon_semaphore.c
    trunk/sys/dev/drm2/radeon/radeon_state.c
    trunk/sys/dev/drm2/radeon/radeon_test.c
    trunk/sys/dev/drm2/radeon/radeon_trace.h
    trunk/sys/dev/drm2/radeon/radeon_trace_points.c
    trunk/sys/dev/drm2/radeon/radeon_ttm.c
    trunk/sys/dev/drm2/radeon/reg_srcs/
    trunk/sys/dev/drm2/radeon/reg_srcs/cayman
    trunk/sys/dev/drm2/radeon/reg_srcs/evergreen
    trunk/sys/dev/drm2/radeon/reg_srcs/r100
    trunk/sys/dev/drm2/radeon/reg_srcs/r200
    trunk/sys/dev/drm2/radeon/reg_srcs/r300
    trunk/sys/dev/drm2/radeon/reg_srcs/r420
    trunk/sys/dev/drm2/radeon/reg_srcs/r600
    trunk/sys/dev/drm2/radeon/reg_srcs/rn50
    trunk/sys/dev/drm2/radeon/reg_srcs/rs600
    trunk/sys/dev/drm2/radeon/reg_srcs/rv515
    trunk/sys/dev/drm2/radeon/rn50_reg_safe.h
    trunk/sys/dev/drm2/radeon/rs100d.h
    trunk/sys/dev/drm2/radeon/rs400.c
    trunk/sys/dev/drm2/radeon/rs400d.h
    trunk/sys/dev/drm2/radeon/rs600.c
    trunk/sys/dev/drm2/radeon/rs600_reg_safe.h
    trunk/sys/dev/drm2/radeon/rs600d.h
    trunk/sys/dev/drm2/radeon/rs690.c
    trunk/sys/dev/drm2/radeon/rs690d.h
    trunk/sys/dev/drm2/radeon/rv200d.h
    trunk/sys/dev/drm2/radeon/rv250d.h
    trunk/sys/dev/drm2/radeon/rv350d.h
    trunk/sys/dev/drm2/radeon/rv515.c
    trunk/sys/dev/drm2/radeon/rv515_reg_safe.h
    trunk/sys/dev/drm2/radeon/rv515d.h
    trunk/sys/dev/drm2/radeon/rv770.c
    trunk/sys/dev/drm2/radeon/rv770d.h
    trunk/sys/dev/drm2/radeon/si.c
    trunk/sys/dev/drm2/radeon/si_blit_shaders.c
    trunk/sys/dev/drm2/radeon/si_blit_shaders.h
    trunk/sys/dev/drm2/radeon/si_reg.h
    trunk/sys/dev/drm2/radeon/sid.h
    trunk/sys/dev/drm2/ttm/
    trunk/sys/dev/drm2/ttm/ttm_agp_backend.c
    trunk/sys/dev/drm2/ttm/ttm_bo.c
    trunk/sys/dev/drm2/ttm/ttm_bo_api.h
    trunk/sys/dev/drm2/ttm/ttm_bo_driver.h
    trunk/sys/dev/drm2/ttm/ttm_bo_manager.c
    trunk/sys/dev/drm2/ttm/ttm_bo_util.c
    trunk/sys/dev/drm2/ttm/ttm_bo_vm.c
    trunk/sys/dev/drm2/ttm/ttm_execbuf_util.c
    trunk/sys/dev/drm2/ttm/ttm_execbuf_util.h
    trunk/sys/dev/drm2/ttm/ttm_lock.c
    trunk/sys/dev/drm2/ttm/ttm_lock.h
    trunk/sys/dev/drm2/ttm/ttm_memory.c
    trunk/sys/dev/drm2/ttm/ttm_memory.h
    trunk/sys/dev/drm2/ttm/ttm_module.h
    trunk/sys/dev/drm2/ttm/ttm_object.c
    trunk/sys/dev/drm2/ttm/ttm_object.h
    trunk/sys/dev/drm2/ttm/ttm_page_alloc.c
    trunk/sys/dev/drm2/ttm/ttm_page_alloc.h
    trunk/sys/dev/drm2/ttm/ttm_page_alloc_dma.c
    trunk/sys/dev/drm2/ttm/ttm_placement.h
    trunk/sys/dev/drm2/ttm/ttm_tt.c

Property Changed:
----------------
    trunk/sys/dev/drm2/drm_global.c
    trunk/sys/dev/drm2/drm_global.h
    trunk/sys/dev/drm2/drm_ioc32.c
    trunk/sys/dev/drm2/i915/i915_ioc32.c

Added: trunk/sys/dev/drm2/ati_pcigart.c
===================================================================
--- trunk/sys/dev/drm2/ati_pcigart.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ati_pcigart.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,213 @@
+/* $MidnightBSD$ */
+/**
+ * \file ati_pcigart.c
+ * ATI PCI GART support
+ *
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Wed Dec 13 21:52:19 2000 by gareth at valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ati_pcigart.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+
+# define ATI_PCIGART_PAGE_SIZE		4096	/**< PCI GART page size */
+
+static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+				       struct drm_ati_pcigart_info *gart_info)
+{
+	gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
+						PAGE_SIZE, BUS_SPACE_MAXADDR);
+	if (gart_info->table_handle == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void drm_ati_free_pcigart_table(struct drm_device *dev,
+				       struct drm_ati_pcigart_info *gart_info)
+{
+	drm_pci_free(dev, gart_info->table_handle);
+	gart_info->table_handle = NULL;
+}
+
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_sg_mem *entry = dev->sg;
+#ifdef __linux__
+	unsigned long pages;
+	int i;
+	int max_pages;
+#endif
+
+	/* we need to support large memory configurations */
+	if (!entry) {
+		DRM_ERROR("no scatter/gather memory!\n");
+		return 0;
+	}
+
+	if (gart_info->bus_addr) {
+#ifdef __linux__
+
+		max_pages = (gart_info->table_size / sizeof(u32));
+		pages = (entry->pages <= max_pages)
+		  ? entry->pages : max_pages;
+
+		for (i = 0; i < pages; i++) {
+			if (!entry->busaddr[i])
+				break;
+			pci_unmap_page(dev->pdev, entry->busaddr[i],
+					 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+#endif
+
+		if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+			gart_info->bus_addr = 0;
+	}
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
+	    gart_info->table_handle) {
+		drm_ati_free_pcigart_table(dev, gart_info);
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
+
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_local_map *map = &gart_info->mapping;
+	struct drm_sg_mem *entry = dev->sg;
+	void *address = NULL;
+	unsigned long pages;
+	u32 *pci_gart = NULL, page_base, gart_idx;
+	dma_addr_t bus_address = 0;
+	int i, j, ret = 0;
+	int max_ati_pages, max_real_pages;
+
+	if (!entry) {
+		DRM_ERROR("no scatter/gather memory!\n");
+		goto done;
+	}
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+		DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+
+#ifdef __linux__
+		if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+			DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+				  (unsigned long long)gart_info->table_mask);
+			ret = 1;
+			goto done;
+		}
+#endif
+
+		ret = drm_ati_alloc_pcigart_table(dev, gart_info);
+		if (ret) {
+			DRM_ERROR("cannot allocate PCI GART page!\n");
+			goto done;
+		}
+
+		pci_gart = gart_info->table_handle->vaddr;
+		address = gart_info->table_handle->vaddr;
+		bus_address = gart_info->table_handle->busaddr;
+	} else {
+		address = gart_info->addr;
+		bus_address = gart_info->bus_addr;
+		DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
+			  (unsigned long long)bus_address,
+			  (unsigned long)address);
+	}
+
+
+	max_ati_pages = (gart_info->table_size / sizeof(u32));
+	max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+	pages = (entry->pages <= max_real_pages)
+	    ? entry->pages : max_real_pages;
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+		memset(pci_gart, 0, max_ati_pages * sizeof(u32));
+	} else {
+		memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
+	}
+
+	gart_idx = 0;
+	for (i = 0; i < pages; i++) {
+#ifdef __linux__
+		/* we need to support large memory configurations */
+		entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
+						 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
+			DRM_ERROR("unable to map PCIGART pages!\n");
+			drm_ati_pcigart_cleanup(dev, gart_info);
+			address = NULL;
+			bus_address = 0;
+			goto done;
+		}
+#endif
+		page_base = (u32) entry->busaddr[i];
+
+		for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+			u32 val;
+
+			switch(gart_info->gart_reg_if) {
+			case DRM_ATI_GART_IGP:
+				val = page_base | 0xc;
+				break;
+			case DRM_ATI_GART_PCIE:
+				val = (page_base >> 8) | 0xc;
+				break;
+			default:
+			case DRM_ATI_GART_PCI:
+				val = page_base;
+				break;
+			}
+			if (gart_info->gart_table_location ==
+			    DRM_ATI_GART_MAIN)
+				pci_gart[gart_idx] = cpu_to_le32(val);
+			else
+				DRM_WRITE32(map, gart_idx * sizeof(u32), val);
+			gart_idx++;
+			page_base += ATI_PCIGART_PAGE_SIZE;
+		}
+	}
+	ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+	wbinvd();
+#else
+	mb();
+#endif
+
+      done:
+	gart_info->addr = address;
+	gart_info->bus_addr = bus_address;
+	return ret;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_init);


Property changes on: trunk/sys/dev/drm2/ati_pcigart.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/dev/drm2/drm.h
===================================================================
--- trunk/sys/dev/drm2/drm.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /**
  * \file drm.h
  * Header for the Direct Rendering Manager
@@ -34,71 +35,33 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/drm2/drm.h 261673 2014-02-09 19:54:39Z dumbbell $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm.h 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/**
- * \mainpage
- *
- * The Direct Rendering Manager (DRM) is a device-independent kernel-level
- * device driver that provides support for the XFree86 Direct Rendering
- * Infrastructure (DRI).
- *
- * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
- * ways:
- *     -# The DRM provides synchronized access to the graphics hardware via
- *        the use of an optimized two-tiered lock.
- *     -# The DRM enforces the DRI security policy for access to the graphics
- *        hardware by only allowing authenticated X11 clients access to
- *        restricted regions of memory.
- *     -# The DRM provides a generic DMA engine, complete with multiple
- *        queues and the ability to detect the need for an OpenGL context
- *        switch.
- *     -# The DRM is extensible via the use of small device-specific modules
- *        that rely extensively on the API exported by the DRM module.
- *
- */
-
 #ifndef _DRM_H_
 #define _DRM_H_
 
-#ifndef __user
-#define __user
-#endif
-#ifndef __iomem
-#define __iomem
-#endif
+#if defined(__linux__)
 
-#ifdef __GNUC__
-# define DEPRECATED  __attribute__ ((deprecated))
-#else
-# define DEPRECATED
-#endif
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
 
-#if defined(__linux__)
-#include <asm/ioctl.h>		/* For _IO* macros */
-#define DRM_IOCTL_NR(n)		_IOC_NR(n)
-#define DRM_IOC_VOID		_IOC_NONE
-#define DRM_IOC_READ		_IOC_READ
-#define DRM_IOC_WRITE		_IOC_WRITE
-#define DRM_IOC_READWRITE	_IOC_READ|_IOC_WRITE
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__MidnightBSD__) || defined(__MidnightBSD_kernel__)
+#else /* One of the BSDs */
+
 #include <sys/ioccom.h>
-#define DRM_IOCTL_NR(n)		((n) & 0xff)
-#define DRM_IOC_VOID		IOC_VOID
-#define DRM_IOC_READ		IOC_OUT
-#define DRM_IOC_WRITE		IOC_IN
-#define DRM_IOC_READWRITE	IOC_INOUT
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-#endif
+#include <sys/types.h>
+typedef int8_t   __s8;
+typedef uint8_t  __u8;
+typedef int16_t  __s16;
+typedef uint16_t __u16;
+typedef int32_t  __s32;
+typedef uint32_t __u32;
+typedef int64_t  __s64;
+typedef uint64_t __u64;
+typedef unsigned long drm_handle_t;
 
-#ifdef __OpenBSD__
-#define DRM_MAJOR       81
+#include <dev/drm2/drm_os_freebsd.h>
 #endif
-#if defined(__linux__) || defined(__NetBSD__)
-#define DRM_MAJOR       226
-#endif
-#define DRM_MAX_MINOR   15
 
 #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
 #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
@@ -111,20 +74,14 @@
 #define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
 
-#if defined(__linux__)
-typedef unsigned int drm_handle_t;
-#else
-#include <sys/types.h>
-typedef unsigned long drm_handle_t;	/**< To mapped regions */
-#endif
-typedef unsigned int drm_context_t;	/**< GLXContext handle */
+typedef unsigned int drm_context_t;
 typedef unsigned int drm_drawable_t;
-typedef unsigned int drm_magic_t;	/**< Magic for authentication */
+typedef unsigned int drm_magic_t;
 
 /**
  * Cliprect.
  *
- * \warning If you change this structure, make sure you change
+ * \warning: If you change this structure, make sure you change
  * XF86DRIClipRectRec in the server as well
  *
  * \note KW: Actually it's illegal to change either for
@@ -138,6 +95,14 @@
 };
 
 /**
+ * Drawable information.
+ */
+struct drm_drawable_info {
+	unsigned int num_rects;
+	struct drm_clip_rect *rects;
+};
+
+/**
  * Texture region,
  */
 struct drm_tex_region {
@@ -160,22 +125,6 @@
 	char padding[60];			/**< Pad to cache line */
 };
 
-/* This is beyond ugly, and only works on GCC.  However, it allows me to use
- * drm.h in places (i.e., in the X-server) where I can't use size_t.  The real
- * fix is to use uint32_t instead of size_t, but that fix will break existing
- * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems.  That *will*
- * eventually happen, though.  I chose 'unsigned long' to be the fallback type
- * because that works on all the platforms I know about.  Hopefully, the
- * real fix will happen before that bites us.
- */
-
-#ifdef __SIZE_TYPE__
-# define DRM_SIZE_T __SIZE_TYPE__
-#else
-# warning "__SIZE_TYPE__ not defined.  Assuming sizeof(size_t) == sizeof(unsigned long)!"
-# define DRM_SIZE_T unsigned long
-#endif
-
 /**
  * DRM_IOCTL_VERSION ioctl argument type.
  *
@@ -185,12 +134,12 @@
 	int version_major;	  /**< Major version */
 	int version_minor;	  /**< Minor version */
 	int version_patchlevel;	  /**< Patch level */
-	DRM_SIZE_T name_len;	  /**< Length of name buffer */
-	char __user *name;		  /**< Name of driver */
-	DRM_SIZE_T date_len;	  /**< Length of date buffer */
-	char __user *date;		  /**< User-space buffer to hold date */
-	DRM_SIZE_T desc_len;	  /**< Length of desc buffer */
-	char __user *desc;		  /**< User-space buffer to hold desc */
+	size_t name_len;	  /**< Length of name buffer */
+	char __user *name;	  /**< Name of driver */
+	size_t date_len;	  /**< Length of date buffer */
+	char __user *date;	  /**< User-space buffer to hold date */
+	size_t desc_len;	  /**< Length of desc buffer */
+	char __user *desc;	  /**< User-space buffer to hold desc */
 };
 
 /**
@@ -199,12 +148,10 @@
  * \sa drmGetBusid() and drmSetBusId().
  */
 struct drm_unique {
-	DRM_SIZE_T unique_len;	  /**< Length of unique */
-	char __user *unique;		  /**< Unique name for driver instantiation */
+	size_t unique_len;	  /**< Length of unique */
+	char __user *unique;	  /**< Unique name for driver instantiation */
 };
 
-#undef DRM_SIZE_T
-
 struct drm_list {
 	int count;		  /**< Length of user-space structures */
 	struct drm_version __user *version;
@@ -239,7 +186,7 @@
 	_DRM_AGP = 3,		  /**< AGP/GART */
 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
 	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
-	_DRM_GEM = 6		  /**< GEM */
+	_DRM_GEM = 6,		  /**< GEM object */
 };
 
 /**
@@ -388,8 +335,8 @@
 	enum {
 		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
 		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
-		_DRM_SG_BUFFER  = 0x04,	/**< Scatter/gather memory buffer */
-		_DRM_FB_BUFFER  = 0x08, /**< Buffer is in frame buffer */
+		_DRM_SG_BUFFER = 0x04,	/**< Scatter/gather memory buffer */
+		_DRM_FB_BUFFER = 0x08,	/**< Buffer is in frame buffer */
 		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
 	} flags;
 	unsigned long agp_start; /**<
@@ -402,8 +349,8 @@
  * DRM_IOCTL_INFO_BUFS ioctl argument type.
  */
 struct drm_buf_info {
-	int count;		  /**< Number of buffers described in list */
-	struct drm_buf_desc __user *list; /**< List of buffer descriptions */
+	int count;		/**< Entries in list */
+	struct drm_buf_desc __user *list;
 };
 
 /**
@@ -431,11 +378,7 @@
  */
 struct drm_buf_map {
 	int count;		/**< Length of the buffer list */
-#if defined(__cplusplus)
-	void __user *c_virtual;
-#else
 	void __user *virtual;		/**< Mmap'd area in user-virtual */
-#endif
 	struct drm_buf_pub __user *list;	/**< Buffer information */
 };
 
@@ -454,7 +397,7 @@
 	enum drm_dma_flags flags;	  /**< Flags */
 	int request_count;		  /**< Number of buffers requested */
 	int request_size;		  /**< Desired size for buffers */
-	int __user *request_indices;	 /**< Buffer information */
+	int __user *request_indices;	  /**< Buffer information */
 	int __user *request_sizes;
 	int granted_count;		  /**< Number of buffers granted */
 };
@@ -525,12 +468,13 @@
 enum drm_vblank_seq_type {
 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
+	/* bits 1-6 are reserved for high crtcs */
 	_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
 	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
-	_DRM_VBLANK_FLIP = 0x8000000,	/**< Scheduled buffer swap should flip */
+	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
-	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking */
+	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking, unsupported */
 };
 #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
 
@@ -561,7 +505,6 @@
 	struct drm_wait_vblank_reply reply;
 };
 
-
 #define _DRM_PRE_MODESET 1
 #define _DRM_POST_MODESET 2
 
@@ -571,8 +514,8 @@
  * \sa drmModesetCtl().
  */
 struct drm_modeset_ctl {
-	uint32_t crtc;
-	uint32_t cmd;
+	__u32 crtc;
+	__u32 cmd;
 };
 
 /**
@@ -617,16 +560,14 @@
 	int agp_version_major;
 	int agp_version_minor;
 	unsigned long mode;
-	unsigned long aperture_base;   /**< physical address */
-	unsigned long aperture_size;   /**< bytes */
-	unsigned long memory_allowed;  /**< bytes */
+	unsigned long aperture_base;	/* physical address */
+	unsigned long aperture_size;	/* bytes */
+	unsigned long memory_allowed;	/* bytes */
 	unsigned long memory_used;
 
-	/** \name PCI information */
-	/*@{ */
+	/* PCI information */
 	unsigned short id_vendor;
 	unsigned short id_device;
-	/*@} */
 };
 
 /**
@@ -647,384 +588,53 @@
 	int drm_dd_minor;
 };
 
-#define DRM_FENCE_FLAG_EMIT                0x00000001
-#define DRM_FENCE_FLAG_SHAREABLE           0x00000002
-/**
- * On hardware with no interrupt events for operation completion,
- * indicates that the kernel should sleep while waiting for any blocking
- * operation to complete rather than spinning.
- *
- * Has no effect otherwise.
- */
-#define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
-#define DRM_FENCE_FLAG_NO_USER             0x00000010
-
-/* Reserved for driver use */
-#define DRM_FENCE_MASK_DRIVER              0xFF000000
-
-#define DRM_FENCE_TYPE_EXE                 0x00000001
-
-struct drm_fence_arg {
-	unsigned int handle;
-	unsigned int fence_class;
-	unsigned int type;
-	unsigned int flags;
-	unsigned int signaled;
-	unsigned int error;
-	unsigned int sequence;
-	unsigned int pad64;
-	uint64_t expand_pad[2]; /* Future expansion */
-};
-
-/* Buffer permissions, referring to how the GPU uses the buffers.
- * these translate to fence types used for the buffers.
- * Typically a texture buffer is read, A destination buffer is write and
- *  a command (batch-) buffer is exe. Can be or-ed together.
- */
-
-#define DRM_BO_FLAG_READ        (1ULL << 0)
-#define DRM_BO_FLAG_WRITE       (1ULL << 1)
-#define DRM_BO_FLAG_EXE         (1ULL << 2)
-
-/*
- * All of the bits related to access mode
- */
-#define DRM_BO_MASK_ACCESS	(DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
-/*
- * Status flags. Can be read to determine the actual state of a buffer.
- * Can also be set in the buffer mask before validation.
- */
-
-/*
- * Mask: Never evict this buffer. Not even with force. This type of buffer is only
- * available to root and must be manually removed before buffer manager shutdown
- * or lock.
- * Flags: Acknowledge
- */
-#define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
-
-/*
- * Mask: Require that the buffer is placed in mappable memory when validated.
- *       If not set the buffer may or may not be in mappable memory when validated.
- * Flags: If set, the buffer is in mappable memory.
- */
-#define DRM_BO_FLAG_MAPPABLE    (1ULL << 5)
-
-/* Mask: The buffer should be shareable with other processes.
- * Flags: The buffer is shareable with other processes.
- */
-#define DRM_BO_FLAG_SHAREABLE   (1ULL << 6)
-
-/* Mask: If set, place the buffer in cache-coherent memory if available.
- *       If clear, never place the buffer in cache coherent memory if validated.
- * Flags: The buffer is currently in cache-coherent memory.
- */
-#define DRM_BO_FLAG_CACHED      (1ULL << 7)
-
-/* Mask: Make sure that every time this buffer is validated,
- *       it ends up on the same location provided that the memory mask is the same.
- *       The buffer will also not be evicted when claiming space for
- *       other buffers. Basically a pinned buffer but it may be thrown out as
- *       part of buffer manager shutdown or locking.
- * Flags: Acknowledge.
- */
-#define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
-
-/* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction
- * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
- * with unsnooped PTEs instead of snooped, by using chipset-specific cache
- * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
- * as the eviction to local memory (TTM unbind) on map is just a side effect
- * to prevent aggressive cache prefetch from the GPU disturbing the cache
- * management that the DRM is doing.
- *
- * Flags: Acknowledge.
- * Buffers allocated with this flag should not be used for suballocators
- * This type may have issues on CPUs with over-aggressive caching
- * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
- */
-#define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19)
-
-
-/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
- * Flags: Acknowledge.
- */
-#define DRM_BO_FLAG_FORCE_CACHING  (1ULL << 13)
-
-/*
- * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
- * Flags: Acknowledge.
- */
-#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
-#define DRM_BO_FLAG_TILE           (1ULL << 15)
-
-/*
- * Memory type flags that can be or'ed together in the mask, but only
- * one appears in flags.
- */
-
-/* System memory */
-#define DRM_BO_FLAG_MEM_LOCAL  (1ULL << 24)
-/* Translation table memory */
-#define DRM_BO_FLAG_MEM_TT     (1ULL << 25)
-/* Vram memory */
-#define DRM_BO_FLAG_MEM_VRAM   (1ULL << 26)
-/* Up to the driver to define. */
-#define DRM_BO_FLAG_MEM_PRIV0  (1ULL << 27)
-#define DRM_BO_FLAG_MEM_PRIV1  (1ULL << 28)
-#define DRM_BO_FLAG_MEM_PRIV2  (1ULL << 29)
-#define DRM_BO_FLAG_MEM_PRIV3  (1ULL << 30)
-#define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
-/* We can add more of these now with a 64-bit flag type */
-
-/*
- * This is a mask covering all of the memory type flags; easier to just
- * use a single constant than a bunch of | values. It covers
- * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
- */
-#define DRM_BO_MASK_MEM         0x00000000FF000000ULL
-/*
- * This adds all of the CPU-mapping options in with the memory
- * type to label all bits which change how the page gets mapped
- */
-#define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \
-				 DRM_BO_FLAG_CACHED_MAPPED | \
-				 DRM_BO_FLAG_CACHED | \
-				 DRM_BO_FLAG_MAPPABLE)
-				 
-/* Driver-private flags */
-#define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
-
-/*
- * Don't block on validate and map. Instead, return EBUSY.
- */
-#define DRM_BO_HINT_DONT_BLOCK  0x00000002
-/*
- * Don't place this buffer on the unfenced list. This means
- * that the buffer will not end up having a fence associated
- * with it as a result of this operation
- */
-#define DRM_BO_HINT_DONT_FENCE  0x00000004
-/**
- * On hardware with no interrupt events for operation completion,
- * indicates that the kernel should sleep while waiting for any blocking
- * operation to complete rather than spinning.
- *
- * Has no effect otherwise.
- */
-#define DRM_BO_HINT_WAIT_LAZY   0x00000008
-/*
- * The client has compute relocations refering to this buffer using the
- * offset in the presumed_offset field. If that offset ends up matching
- * where this buffer lands, the kernel is free to skip executing those
- * relocations
- */
-#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
-
-#define DRM_BO_INIT_MAGIC 0xfe769812
-#define DRM_BO_INIT_MAJOR 1
-#define DRM_BO_INIT_MINOR 0
-#define DRM_BO_INIT_PATCH 0
-
-
-struct drm_bo_info_req {
-	uint64_t mask;
-	uint64_t flags;
-	unsigned int handle;
-	unsigned int hint;
-	unsigned int fence_class;
-	unsigned int desired_tile_stride;
-	unsigned int tile_info;
-	unsigned int pad64;
-	uint64_t presumed_offset;
-};
-
-struct drm_bo_create_req {
-	uint64_t flags;
-	uint64_t size;
-	uint64_t buffer_start;
-	unsigned int hint;
-	unsigned int page_alignment;
-};
-
-
-/*
- * Reply flags
- */
-
-#define DRM_BO_REP_BUSY 0x00000001
-
-struct drm_bo_info_rep {
-	uint64_t flags;
-	uint64_t proposed_flags;
-	uint64_t size;
-	uint64_t offset;
-	uint64_t arg_handle;
-	uint64_t buffer_start;
-	unsigned int handle;
-	unsigned int fence_flags;
-	unsigned int rep_flags;
-	unsigned int page_alignment;
-	unsigned int desired_tile_stride;
-	unsigned int hw_tile_stride;
-	unsigned int tile_info;
-	unsigned int pad64;
-	uint64_t expand_pad[4]; /*Future expansion */
-};
-
-struct drm_bo_arg_rep {
-	struct drm_bo_info_rep bo_info;
-	int ret;
-	unsigned int pad64;
-};
-
-struct drm_bo_create_arg {
-	union {
-		struct drm_bo_create_req req;
-		struct drm_bo_info_rep rep;
-	} d;
-};
-
-struct drm_bo_handle_arg {
-	unsigned int handle;
-};
-
-struct drm_bo_reference_info_arg {
-	union {
-		struct drm_bo_handle_arg req;
-		struct drm_bo_info_rep rep;
-	} d;
-};
-
-struct drm_bo_map_wait_idle_arg {
-	union {
-		struct drm_bo_info_req req;
-		struct drm_bo_info_rep rep;
-	} d;
-};
-
-struct drm_bo_op_req {
-	enum {
-		drm_bo_validate,
-		drm_bo_fence,
-		drm_bo_ref_fence,
-	} op;
-	unsigned int arg_handle;
-	struct drm_bo_info_req bo_req;
-};
-
-
-struct drm_bo_op_arg {
-	uint64_t next;
-	union {
-		struct drm_bo_op_req req;
-		struct drm_bo_arg_rep rep;
-	} d;
-	int handled;
-	unsigned int pad64;
-};
-
-
-#define DRM_BO_MEM_LOCAL 0
-#define DRM_BO_MEM_TT 1
-#define DRM_BO_MEM_VRAM 2
-#define DRM_BO_MEM_PRIV0 3
-#define DRM_BO_MEM_PRIV1 4
-#define DRM_BO_MEM_PRIV2 5
-#define DRM_BO_MEM_PRIV3 6
-#define DRM_BO_MEM_PRIV4 7
-
-#define DRM_BO_MEM_TYPES 8 /* For now. */
-
-#define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
-#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
-
-struct drm_bo_version_arg {
-	uint32_t major;
-	uint32_t minor;
-	uint32_t patchlevel;
-};
-
-struct drm_mm_type_arg {
-	unsigned int mem_type;
-	unsigned int lock_flags;
-};
-
-struct drm_mm_init_arg {
-	unsigned int magic;
-	unsigned int major;
-	unsigned int minor;
-	unsigned int mem_type;
-	uint64_t p_offset;
-	uint64_t p_size;
-};
-
-struct drm_mm_info_arg {
-	unsigned int mem_type;
-	uint64_t p_size;
-};
-
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
 struct drm_gem_close {
 	/** Handle of the object to be closed. */
-	uint32_t handle;
-	uint32_t pad;
+	__u32 handle;
+	__u32 pad;
 };
 
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
 struct drm_gem_flink {
 	/** Handle for the object being named */
-	uint32_t handle;
+	__u32 handle;
 
 	/** Returned global name */
-	uint32_t name;
+	__u32 name;
 };
 
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
 struct drm_gem_open {
 	/** Name of object being opened */
-	uint32_t name;
+	__u32 name;
 
 	/** Returned handle for the object */
-	uint32_t handle;
-	
+	__u32 handle;
+
 	/** Returned size of the object */
-	uint64_t size;
+	__u64 size;
 };
 
+/** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
-	uint64_t capability;
-	uint64_t value;
+	__u64 capability;
+	__u64 value;
 };
 
-struct drm_event {
-	uint32_t type;
-	uint32_t length;
-};
+#define DRM_CLOEXEC O_CLOEXEC
+struct drm_prime_handle {
+	__u32 handle;
 
-#define DRM_EVENT_VBLANK 0x01
-#define DRM_EVENT_FLIP_COMPLETE 0x02
+	/** Flags.. only applicable for handle->fd */
+	__u32 flags;
 
-struct drm_event_vblank {
-	struct drm_event base;
-	uint64_t user_data;
-	uint32_t tv_sec;
-	uint32_t tv_usec;
-	uint32_t sequence;
-	uint32_t reserved;
+	/** Returned dmabuf file descriptor */
+	__s32 fd;
 };
 
-#define DRM_CAP_DUMB_BUFFER 0x1
-#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
-#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
-#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
-#define DRM_CAP_PRIME 0x5
-#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+#include <dev/drm2/drm_mode.h>
 
-#include "drm_mode.h"
-
-/**
- * \name Ioctls Definitions
- */
-/*@{*/
-
 #define DRM_IOCTL_BASE			'd'
 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
 #define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
@@ -1039,12 +649,10 @@
 #define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
 #define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
-#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08,  struct drm_modeset_ctl)
-
+#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
 #define DRM_IOCTL_GEM_CLOSE		DRM_IOW (0x09, struct drm_gem_close)
 #define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
 #define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
-
 #define DRM_IOCTL_GET_CAP		DRM_IOWR(0x0c, struct drm_get_cap)
 
 #define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
@@ -1062,7 +670,7 @@
 #define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
 
 #define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
-#define DRM_IOCTL_GET_SAREA_CTX		DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, struct drm_ctx_priv_map)
 
 #define DRM_IOCTL_SET_MASTER            DRM_IO(0x1e)
 #define DRM_IOCTL_DROP_MASTER           DRM_IO(0x1f)
@@ -1081,7 +689,8 @@
 #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
 #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
 
-#define DRM_IOCTL_GEM_PRIME_OPEN        DRM_IOWR(0x2e, struct drm_gem_open)
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
 
 #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
 #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
@@ -1097,7 +706,7 @@
 
 #define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
 
-#define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, struct drm_update_draw)
+#define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, struct drm_update_draw)
 
 #define DRM_IOCTL_MODE_GETRESOURCES	DRM_IOWR(0xA0, struct drm_mode_card_res)
 #define DRM_IOCTL_MODE_GETCRTC		DRM_IOWR(0xA1, struct drm_mode_crtc)
@@ -1119,41 +728,16 @@
 #define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
 #define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
 
-#define DRM_IOCTL_MODE_CREATE_DUMB	DRM_IOWR(0xB2, struct drm_mode_create_dumb)
-#define DRM_IOCTL_MODE_MAP_DUMB		DRM_IOWR(0xB3, struct drm_mode_map_dumb)
-#define DRM_IOCTL_MODE_DESTROY_DUMB	DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
 #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
-#define DRM_IOCTL_MODE_GETPLANE		DRM_IOWR(0xB6, struct drm_mode_get_plane)
-#define DRM_IOCTL_MODE_SETPLANE		DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_GETPLANE	DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE	DRM_IOWR(0xB7, struct drm_mode_set_plane)
 #define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES	DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
+#define DRM_IOCTL_MODE_OBJ_SETPROPERTY	DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
 
-#define DRM_IOCTL_MM_INIT               DRM_IOWR(0xc0, struct drm_mm_init_arg)
-#define DRM_IOCTL_MM_TAKEDOWN           DRM_IOWR(0xc1, struct drm_mm_type_arg)
-#define DRM_IOCTL_MM_LOCK               DRM_IOWR(0xc2, struct drm_mm_type_arg)
-#define DRM_IOCTL_MM_UNLOCK             DRM_IOWR(0xc3, struct drm_mm_type_arg)
-
-#define DRM_IOCTL_FENCE_CREATE          DRM_IOWR(0xc4, struct drm_fence_arg)
-#define DRM_IOCTL_FENCE_REFERENCE       DRM_IOWR(0xc6, struct drm_fence_arg)
-#define DRM_IOCTL_FENCE_UNREFERENCE     DRM_IOWR(0xc7, struct drm_fence_arg)
-#define DRM_IOCTL_FENCE_SIGNALED        DRM_IOWR(0xc8, struct drm_fence_arg)
-#define DRM_IOCTL_FENCE_FLUSH           DRM_IOWR(0xc9, struct drm_fence_arg)
-#define DRM_IOCTL_FENCE_WAIT            DRM_IOWR(0xca, struct drm_fence_arg)
-#define DRM_IOCTL_FENCE_EMIT            DRM_IOWR(0xcb, struct drm_fence_arg)
-#define DRM_IOCTL_FENCE_BUFFERS         DRM_IOWR(0xcc, struct drm_fence_arg)
-
-#define DRM_IOCTL_BO_CREATE             DRM_IOWR(0xcd, struct drm_bo_create_arg)
-#define DRM_IOCTL_BO_MAP                DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
-#define DRM_IOCTL_BO_UNMAP              DRM_IOWR(0xd0, struct drm_bo_handle_arg)
-#define DRM_IOCTL_BO_REFERENCE          DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
-#define DRM_IOCTL_BO_UNREFERENCE        DRM_IOWR(0xd2, struct drm_bo_handle_arg)
-#define DRM_IOCTL_BO_SETSTATUS          DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
-#define DRM_IOCTL_BO_INFO               DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
-#define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
-#define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
-#define DRM_IOCTL_MM_INFO               DRM_IOWR(0xd7, struct drm_mm_info_arg)
-
-/*@}*/
-
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x99.
@@ -1163,11 +747,51 @@
  * drmCommandReadWrite().
  */
 #define DRM_COMMAND_BASE                0x40
-#define DRM_COMMAND_END                 0xA0
+#define DRM_COMMAND_END			0xA0
 
+/**
+ * Header for events written back to userspace on the drm fd.  The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event.  A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+	__u32 type;
+	__u32 length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+	struct drm_event base;
+	__u64 user_data;
+	__u32 tv_sec;
+	__u32 tv_usec;
+	__u32 sequence;
+	__u32 reserved;
+};
+
+#define DRM_CAP_DUMB_BUFFER 0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_PRIME 0x5
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+
+#define DRM_PRIME_CAP_IMPORT 0x1
+#define DRM_PRIME_CAP_EXPORT 0x2
+
 /* typedef area */
 #ifndef __KERNEL__
 typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
 typedef struct drm_tex_region drm_tex_region_t;
 typedef struct drm_hw_lock drm_hw_lock_t;
 typedef struct drm_version drm_version_t;
@@ -1201,16 +825,12 @@
 typedef struct drm_auth drm_auth_t;
 typedef struct drm_irq_busid drm_irq_busid_t;
 typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
 typedef struct drm_agp_buffer drm_agp_buffer_t;
 typedef struct drm_agp_binding drm_agp_binding_t;
 typedef struct drm_agp_info drm_agp_info_t;
 typedef struct drm_scatter_gather drm_scatter_gather_t;
 typedef struct drm_set_version drm_set_version_t;
-
-typedef struct drm_fence_arg drm_fence_arg_t;
-typedef struct drm_mm_type_arg drm_mm_type_arg_t;
-typedef struct drm_mm_init_arg drm_mm_init_arg_t;
-typedef enum drm_bo_type drm_bo_type_t;
 #endif
 
 #endif

Modified: trunk/sys/dev/drm2/drmP.h
===================================================================
--- trunk/sys/dev/drm2/drmP.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drmP.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,10 +1,16 @@
 /* $MidnightBSD$ */
-/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
- * Created: Mon Jan  4 10:05:05 1999 by faith at precisioninsight.com
+/**
+ * \file drmP.h
+ * Private header for Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
  */
-/*-
+
+/*
  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
  * All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,15 +31,10 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drmP.h 249080 2013-04-04 05:36:11Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drmP.h 305554 2016-09-07 18:53:46Z dim $");
 
 #ifndef _DRM_P_H_
 #define _DRM_P_H_
@@ -40,9 +41,6 @@
 
 #if defined(_KERNEL) || defined(__KERNEL__)
 
-struct drm_device;
-struct drm_file;
-
 #include <sys/param.h>
 #include <sys/queue.h>
 #include <sys/malloc.h>
@@ -59,6 +57,7 @@
 #include <sys/fcntl.h>
 #include <sys/uio.h>
 #include <sys/filio.h>
+#include <sys/rwlock.h>
 #include <sys/selinfo.h>
 #include <sys/sysctl.h>
 #include <sys/bus.h>
@@ -99,15 +98,34 @@
 #include <sys/bus.h>
 
 #include <dev/drm2/drm.h>
+#include <dev/drm2/drm_sarea.h>
+
 #include <dev/drm2/drm_atomic.h>
-#include <dev/drm2/drm_internal.h>
 #include <dev/drm2/drm_linux_list.h>
 #include <dev/drm2/drm_gem_names.h>
+
+#include <dev/drm2/drm_os_freebsd.h>
+
+#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
+#define __OS_HAS_AGP 1
+#else
+#define __OS_HAS_AGP 0
+#endif
+#if defined(CONFIG_MTRR)
+#define __OS_HAS_MTRR 1
+#else
+#define __OS_HAS_MTRR 0
+#endif
+
+struct drm_file;
+struct drm_device;
+
+#include <dev/drm2/drm_hashtab.h>
 #include <dev/drm2/drm_mm.h>
-#include <dev/drm2/drm_hashtab.h>
 
 #include "opt_compat.h"
 #include "opt_drm.h"
+#include "opt_syscons.h"
 #ifdef DRM_DEBUG
 #undef DRM_DEBUG
 #define DRM_DEBUG_DEFAULT_ON 1
@@ -120,6 +138,10 @@
 #undef DRM_LINUX
 #define DRM_LINUX 0
 
+/***********************************************************************/
+/** \name DRM template customization defaults */
+/*@{*/
+
 /* driver capabilities and requirements mask */
 #define DRIVER_USE_AGP     0x1
 #define DRIVER_REQUIRE_AGP 0x2
@@ -135,317 +157,193 @@
 #define DRIVER_IRQ_VBL2    0x800
 #define DRIVER_GEM         0x1000
 #define DRIVER_MODESET     0x2000
-#define DRIVER_USE_PLATFORM_DEVICE  0x4000
-#define	DRIVER_LOCKLESS_IRQ 0x8000
+#define DRIVER_PRIME       0x4000
 
+#define DRIVER_BUS_PCI 0x1
+#define DRIVER_BUS_PLATFORM 0x2
+#define DRIVER_BUS_USB 0x3
 
-#define DRM_HASH_SIZE	      16 /* Size of key hash table		  */
-#define DRM_KERNEL_CONTEXT    0	 /* Change drm_resctx if changed	  */
-#define DRM_RESERVED_CONTEXTS 1	 /* Change drm_resctx if changed	  */
+/***********************************************************************/
+/** \name Begin the DRM... */
+/*@{*/
 
-#define	DRM_GEM_MAPPING_MASK	(3ULL << 62)
-#define	DRM_GEM_MAPPING_KEY	(2ULL << 62) /* Non-canonical address form */
-#define	DRM_GEM_MAX_IDX		0x3fffff
-#define	DRM_GEM_MAPPING_IDX(o)	(((o) >> 40) & DRM_GEM_MAX_IDX)
-#define	DRM_GEM_MAPPING_OFF(i)	(((uint64_t)(i)) << 40)
-#define	DRM_GEM_MAPPING_MAPOFF(o) \
-    ((o) & ~(DRM_GEM_MAPPING_OFF(DRM_GEM_MAX_IDX) | DRM_GEM_MAPPING_KEY))
+#define DRM_DEBUG_CODE 2	  /**< Include debugging code if > 1, then
+				     also include looping detection. */
 
-MALLOC_DECLARE(DRM_MEM_DMA);
-MALLOC_DECLARE(DRM_MEM_SAREA);
-MALLOC_DECLARE(DRM_MEM_DRIVER);
-MALLOC_DECLARE(DRM_MEM_MAGIC);
-MALLOC_DECLARE(DRM_MEM_IOCTLS);
-MALLOC_DECLARE(DRM_MEM_MAPS);
-MALLOC_DECLARE(DRM_MEM_BUFS);
-MALLOC_DECLARE(DRM_MEM_SEGS);
-MALLOC_DECLARE(DRM_MEM_PAGES);
-MALLOC_DECLARE(DRM_MEM_FILES);
-MALLOC_DECLARE(DRM_MEM_QUEUES);
-MALLOC_DECLARE(DRM_MEM_CMDS);
-MALLOC_DECLARE(DRM_MEM_MAPPINGS);
-MALLOC_DECLARE(DRM_MEM_BUFLISTS);
-MALLOC_DECLARE(DRM_MEM_AGPLISTS);
-MALLOC_DECLARE(DRM_MEM_CTXBITMAP);
-MALLOC_DECLARE(DRM_MEM_SGLISTS);
-MALLOC_DECLARE(DRM_MEM_DRAWABLE);
-MALLOC_DECLARE(DRM_MEM_MM);
-MALLOC_DECLARE(DRM_MEM_HASHTAB);
-MALLOC_DECLARE(DRM_MEM_KMS);
+#define DRM_MAGIC_HASH_ORDER  4  /**< Size of key hash table. Must be power of 2. */
+#define DRM_KERNEL_CONTEXT    0	 /**< Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1	 /**< Change drm_resctx if changed */
+#define DRM_LOOPING_LIMIT     5000000
+#define DRM_TIME_SLICE	      (HZ/20)  /**< Time slice for GLXContexts */
+#define DRM_LOCK_SLICE	      1	/**< Time slice for lock, in jiffies */
 
-SYSCTL_DECL(_hw_drm);
+#define DRM_FLAG_DEBUG	  0x01
 
 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+#define DRM_MAP_HASH_OFFSET 0x10000000
 
-				/* Internal types and structures */
-#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
-#define DRM_MAX(a,b) ((a)>(b)?(a):(b))
+/*@}*/
 
-#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+/***********************************************************************/
+/** \name Macros to make printk easier */
+/*@{*/
 
-#define __OS_HAS_AGP	1
-
-#define DRM_DEV_MODE	(S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
-#define DRM_DEV_UID	0
-#define DRM_DEV_GID	0
-
-#define wait_queue_head_t	atomic_t
-#define DRM_WAKEUP(w)		wakeup((void *)w)
-#define DRM_WAKEUP_INT(w)	wakeup(w)
-#define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0)
-
-#define DRM_CURPROC		curthread
-#define DRM_STRUCTPROC		struct thread
-#define DRM_SPINTYPE		struct mtx
-#define DRM_SPININIT(l,name)	mtx_init(l, name, NULL, MTX_DEF)
-#define DRM_SPINUNINIT(l)	mtx_destroy(l)
-#define DRM_SPINLOCK(l)		mtx_lock(l)
-#define DRM_SPINUNLOCK(u)	mtx_unlock(u)
-#define DRM_SPINLOCK_IRQSAVE(l, irqflags) do {		\
-	mtx_lock(l);					\
-	(void)irqflags;					\
-} while (0)
-#define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u)
-#define DRM_SPINLOCK_ASSERT(l)	mtx_assert(l, MA_OWNED)
-#define DRM_CURRENTPID		curthread->td_proc->p_pid
-#define DRM_LOCK(dev)		sx_xlock(&(dev)->dev_struct_lock)
-#define DRM_UNLOCK(dev) 	sx_xunlock(&(dev)->dev_struct_lock)
-#define	DRM_LOCK_SLEEP(dev, chan, flags, msg, timeout)			\
-    (sx_sleep((chan), &(dev)->dev_struct_lock, (flags), (msg), (timeout)))
-#if defined(INVARIANTS)
-#define	DRM_LOCK_ASSERT(dev)	sx_assert(&(dev)->dev_struct_lock, SA_XLOCKED)
-#define	DRM_UNLOCK_ASSERT(dev)	sx_assert(&(dev)->dev_struct_lock, SA_UNLOCKED)
-#else
-#define	DRM_LOCK_ASSERT(d)
-#define	DRM_UNLOCK_ASSERT(d)
-#endif
-
-#define DRM_SYSCTL_HANDLER_ARGS	(SYSCTL_HANDLER_ARGS)
-
-#define DRM_IRQ_ARGS		void *arg
-typedef void			irqreturn_t;
-#define IRQ_HANDLED		/* nothing */
-#define IRQ_NONE		/* nothing */
-
-#define unlikely(x)            __builtin_expect(!!(x), 0)
-#define likely(x)              __builtin_expect(!!(x), 1)
-#define container_of(ptr, type, member) ({			\
-	__typeof( ((type *)0)->member ) *__mptr = (ptr);	\
-	(type *)( (char *)__mptr - offsetof(type,member) );})
-
-enum {
-	DRM_IS_NOT_AGP,
-	DRM_IS_AGP,
-	DRM_MIGHT_BE_AGP
-};
-#define DRM_AGP_MEM		struct agp_memory_info
-
-#define drm_get_device_from_kdev(_kdev) (_kdev->si_drv1)
-
-#define PAGE_ALIGN(addr) round_page(addr)
-/* DRM_SUSER returns true if the user is superuser */
-#define DRM_SUSER(p)		(priv_check(p, PRIV_DRIVER) == 0)
-#define DRM_AGP_FIND_DEVICE()	agp_find_device()
-#define DRM_MTRR_WC		MDF_WRITECOMBINE
-#define jiffies			ticks
-#define	jiffies_to_msecs(x)	(((int64_t)(x)) * 1000 / hz)
-#define	msecs_to_jiffies(x)	(((int64_t)(x)) * hz / 1000)
-#define	time_after(a,b)		((long)(b) - (long)(a) < 0)
-#define	time_after_eq(a,b)	((long)(b) - (long)(a) <= 0)
-#define drm_msleep(x, msg)	pause((msg), ((int64_t)(x)) * hz / 1000)
-
-typedef vm_paddr_t dma_addr_t;
-typedef uint64_t u64;
-typedef uint32_t u32;
-typedef uint16_t u16;
-typedef uint8_t u8;
-typedef int64_t s64;
-typedef int32_t s32;
-typedef int16_t s16;
-typedef int8_t s8;
-
-/* DRM_READMEMORYBARRIER() prevents reordering of reads.
- * DRM_WRITEMEMORYBARRIER() prevents reordering of writes.
- * DRM_MEMORYBARRIER() prevents reordering of reads and writes.
+/**
+ * Error output.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
  */
-#define DRM_READMEMORYBARRIER()		rmb()
-#define DRM_WRITEMEMORYBARRIER()	wmb()
-#define DRM_MEMORYBARRIER()		mb()
-
-#define DRM_READ8(map, offset)						\
-	*(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) +		\
-	    (vm_offset_t)(offset))
-#define DRM_READ16(map, offset)						\
-	le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) +	\
-	    (vm_offset_t)(offset)))
-#define DRM_READ32(map, offset)						\
-	le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) +	\
-	    (vm_offset_t)(offset)))
-#define DRM_READ64(map, offset)						\
-	le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) +	\
-	    (vm_offset_t)(offset)))
-#define DRM_WRITE8(map, offset, val)					\
-	*(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) +		\
-	    (vm_offset_t)(offset)) = val
-#define DRM_WRITE16(map, offset, val)					\
-	*(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) +		\
-	    (vm_offset_t)(offset)) = htole16(val)
-#define DRM_WRITE32(map, offset, val)					\
-	*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) +		\
-	    (vm_offset_t)(offset)) = htole32(val)
-#define DRM_WRITE64(map, offset, val)					\
-	*(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) +		\
-	    (vm_offset_t)(offset)) = htole64(val)
-
-#define DRM_VERIFYAREA_READ( uaddr, size )		\
-	(!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ))
-
-#define DRM_COPY_TO_USER(user, kern, size) \
-	copyout(kern, user, size)
-#define DRM_COPY_FROM_USER(kern, user, size) \
-	copyin(user, kern, size)
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) 	\
-	copyin(arg2, arg1, arg3)
-#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)	\
-	copyout(arg2, arg1, arg3)
-#define DRM_GET_USER_UNCHECKED(val, uaddr)		\
-	((val) = fuword32(uaddr), 0)
-
-#define cpu_to_le32(x) htole32(x)
-#define le32_to_cpu(x) le32toh(x)
-
-#define DRM_HZ			hz
-#define DRM_UDELAY(udelay)	DELAY(udelay)
-#define DRM_TIME_SLICE		(hz/20)  /* Time slice for GLXContexts	  */
-
-#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do {	\
-	(_map) = (_dev)->context_sareas[_ctx];		\
-} while(0)
-
-#define LOCK_TEST_WITH_RETURN(dev, file_priv)				\
-do {									\
-	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||		\
-	     dev->lock.file_priv != file_priv) {			\
-		DRM_ERROR("%s called without lock held\n",		\
-			   __FUNCTION__);				\
-		return EINVAL;						\
-	}								\
-} while (0)
-
-/* Returns -errno to shared code */
-#define DRM_WAIT_ON( ret, queue, timeout, condition )		\
-for ( ret = 0 ; !ret && !(condition) ; ) {			\
-	DRM_UNLOCK(dev);						\
-	mtx_lock(&dev->irq_lock);				\
-	if (!(condition))					\
-	    ret = -mtx_sleep(&(queue), &dev->irq_lock, 		\
-		PCATCH, "drmwtq", (timeout));			\
-	mtx_unlock(&dev->irq_lock);				\
-	DRM_LOCK(dev);						\
-}
-
 #define DRM_ERROR(fmt, ...) \
 	printf("error: [" DRM_NAME ":pid%d:%s] *ERROR* " fmt,		\
 	    DRM_CURRENTPID, __func__ , ##__VA_ARGS__)
 
+#define DRM_WARNING(fmt, ...)  printf("warning: [" DRM_NAME "] " fmt , ##__VA_ARGS__)
 #define DRM_INFO(fmt, ...)  printf("info: [" DRM_NAME "] " fmt , ##__VA_ARGS__)
 
+/**
+ * Debug output.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
 #define DRM_DEBUG(fmt, ...) do {					\
-	if ((drm_debug_flag & DRM_DEBUGBITS_DEBUG) != 0)		\
+	if ((drm_debug & DRM_DEBUGBITS_DEBUG) != 0)			\
 		printf("[" DRM_NAME ":pid%d:%s] " fmt, DRM_CURRENTPID,	\
 			__func__ , ##__VA_ARGS__);			\
 } while (0)
 
 #define DRM_DEBUG_KMS(fmt, ...) do {					\
-	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)			\
+	if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)			\
 		printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\
 			__func__ , ##__VA_ARGS__);			\
 } while (0)
 
 #define DRM_DEBUG_DRIVER(fmt, ...) do {					\
-	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)			\
+	if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)			\
 		printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\
 			__func__ , ##__VA_ARGS__);			\
 } while (0)
 
-typedef struct drm_pci_id_list
-{
-	int vendor;
-	int device;
-	long driver_private;
-	char *name;
-} drm_pci_id_list_t;
+/*@}*/
 
-struct drm_msi_blacklist_entry
-{
-	int vendor;
-	int device;
-};
+/***********************************************************************/
+/** \name Internal types and structures */
+/*@{*/
 
+#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
+
+#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
+#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
+
+#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+
+/**
+ * Test that the hardware lock is held by the caller, returning otherwise.
+ *
+ * \param dev DRM device.
+ * \param filp file pointer of the caller.
+ */
+#define LOCK_TEST_WITH_RETURN( dev, _file_priv )				\
+do {										\
+	if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) ||	\
+	    _file_priv->master->lock.file_priv != _file_priv)	{		\
+		DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
+			   __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
+			   _file_priv->master->lock.file_priv, _file_priv);	\
+		return -EINVAL;							\
+	}									\
+} while (0)
+
+/**
+ * Ioctl function type.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private pointer.
+ * \param cmd command.
+ * \param arg argument.
+ */
+typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+
+#define DRM_IOCTL_NR(n)                ((n) & 0xff)
+#define DRM_MAJOR       226
+
 #define DRM_AUTH	0x1
-#define DRM_MASTER	0x2
+#define	DRM_MASTER	0x2
 #define DRM_ROOT_ONLY	0x4
 #define DRM_CONTROL_ALLOW 0x8
 #define DRM_UNLOCKED	0x10
 
-typedef struct drm_ioctl_desc {
+struct drm_ioctl_desc {
 	unsigned long cmd;
-	int (*func)(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv);
 	int flags;
-} drm_ioctl_desc_t;
+	drm_ioctl_t *func;
+	unsigned int cmd_drv;
+};
+
 /**
  * Creates a driver or general drm_ioctl_desc array entry for the given
  * ioctl, for use by drm_ioctl().
  */
-#define DRM_IOCTL_DEF(ioctl, func, flags) \
-	[DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
 
-typedef struct drm_magic_entry {
-	drm_magic_t	       magic;
-	struct drm_file	       *priv;
-	struct drm_magic_entry *next;
-} drm_magic_entry_t;
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+	[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
 
-typedef struct drm_magic_head {
-	struct drm_magic_entry *head;
-	struct drm_magic_entry *tail;
-} drm_magic_head_t;
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags)			\
+	[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
 
-typedef struct drm_buf {
-	int		  idx;	       /* Index into master buflist	     */
-	int		  total;       /* Buffer size			     */
-	int		  order;       /* log-base-2(total)		     */
-	int		  used;	       /* Amount of buffer in use (for DMA)  */
-	unsigned long	  offset;      /* Byte offset (used internally)	     */
-	void		  *address;    /* Address of buffer		     */
-	unsigned long	  bus_address; /* Bus address of buffer		     */
-	struct drm_buf	  *next;       /* Kernel-only: used for free list    */
-	__volatile__ int  pending;     /* On hardware DMA queue		     */
-	struct drm_file   *file_priv;  /* Unique identifier of holding process */
-	int		  context;     /* Kernel queue for this buffer	     */
+struct drm_magic_entry {
+	struct list_head head;
+	struct drm_hash_item hash_item;
+	struct drm_file *priv;
+};
+
+/**
+ * DMA buffer.
+ */
+struct drm_buf {
+	int idx;		       /**< Index into master buflist */
+	int total;		       /**< Buffer size */
+	int order;		       /**< log-base-2(total) */
+	int used;		       /**< Amount of buffer in use (for DMA) */
+	unsigned long offset;	       /**< Byte offset (used internally) */
+	void *address;		       /**< Address of buffer */
+	unsigned long bus_address;     /**< Bus address of buffer */
+	struct drm_buf *next;	       /**< Kernel-only: used for free list */
+	__volatile__ int waiting;      /**< On kernel DMA queue */
+	__volatile__ int pending;      /**< On hardware DMA queue */
+	struct drm_file *file_priv;    /**< Private of holding file descr */
+	int context;		       /**< Kernel queue for this buffer */
+	int while_locked;	       /**< Dispatch this buffer while locked */
 	enum {
-		DRM_LIST_NONE	 = 0,
-		DRM_LIST_FREE	 = 1,
-		DRM_LIST_WAIT	 = 2,
-		DRM_LIST_PEND	 = 3,
-		DRM_LIST_PRIO	 = 4,
+		DRM_LIST_NONE = 0,
+		DRM_LIST_FREE = 1,
+		DRM_LIST_WAIT = 2,
+		DRM_LIST_PEND = 3,
+		DRM_LIST_PRIO = 4,
 		DRM_LIST_RECLAIM = 5
-	}		  list;	       /* Which list we're on		     */
+	} list;			       /**< Which list we're on */
 
-	int		  dev_priv_size; /* Size of buffer private stoarge   */
-	void		  *dev_private;  /* Per-buffer private storage       */
-} drm_buf_t;
+	int dev_priv_size;		 /**< Size of buffer private storage */
+	void *dev_private;		 /**< Per-buffer private storage */
+};
 
-typedef struct drm_freelist {
-	int		  initialized; /* Freelist in use		   */
-	atomic_t	  count;       /* Number of free buffers	   */
-	drm_buf_t	  *next;       /* End pointer			   */
+struct drm_freelist {
+	int initialized;	       /**< Freelist in use */
+	atomic_t count;		       /**< Number of free buffers */
+	struct drm_buf *next;	       /**< End pointer */
 
-	int		  low_mark;    /* Low water mark		   */
-	int		  high_mark;   /* High water mark		   */
-} drm_freelist_t;
+#ifdef FREEBSD_NOTYET
+	wait_queue_head_t waiting;     /**< Processes waiting on free bufs */
+#endif /* defined(FREEBSD_NOTYET) */
+	int low_mark;		       /**< Low water mark */
+	int high_mark;		       /**< High water mark */
+#ifdef FREEBSD_NOTYET
+	atomic_t wfh;		       /**< If waiting for high mark */
+	spinlock_t lock;
+#endif /* defined(FREEBSD_NOTYET) */
+};
 
 typedef struct drm_dma_handle {
 	void *vaddr;
@@ -454,16 +352,19 @@
 	bus_dmamap_t map;
 } drm_dma_handle_t;
 
-typedef struct drm_buf_entry {
-	int		  buf_size;
-	int		  buf_count;
-	drm_buf_t	  *buflist;
-	int		  seg_count;
-	drm_dma_handle_t  **seglist;
-	int		  page_order;
+/**
+ * Buffer entry.  There is one of this for each buffer size order.
+ */
+struct drm_buf_entry {
+	int buf_size;			/**< size */
+	int buf_count;			/**< number of buffers */
+	struct drm_buf *buflist;		/**< buffer list */
+	int seg_count;
+	int page_order;
+	struct drm_dma_handle **seglist;
 
-	drm_freelist_t	  freelist;
-} drm_buf_entry_t;
+	struct drm_freelist freelist;
+};
 
 /* Event queued up for userspace to read */
 struct drm_pending_event {
@@ -475,143 +376,164 @@
 	void (*destroy)(struct drm_pending_event *event);
 };
 
-typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+/* initial implementaton using a linked list - todo hashtab */
+struct drm_prime_file_private {
+	struct list_head head;
+	struct mtx lock;
+};
+
 struct drm_file {
-	TAILQ_ENTRY(drm_file) link;
-	struct drm_device *dev;
-	int		  authenticated;
-	int		  master;
-	pid_t		  pid;
-	uid_t		  uid;
-	drm_magic_t	  magic;
-	unsigned long	  ioctl_count;
+	int authenticated;
+	pid_t pid;
+	uid_t uid;
+	drm_magic_t magic;
+	unsigned long ioctl_count;
+	struct list_head lhead;
+	struct drm_minor *minor;
+	unsigned long lock_count;
 
-	void		 *driver_priv;
+	void *driver_priv;
 	struct drm_gem_names object_names;
 
-	int		  is_master;
-	struct drm_master *masterp;
+	int is_master; /* this file private is a master for a minor */
+	struct drm_master *master; /* master this node is currently associated with
+				      N.B. not always minor->master */
+	struct list_head fbs;
 
-	struct list_head  fbs;
+	struct selinfo event_poll;
+	struct list_head event_list;
+	int event_space;
 
-	struct list_head  event_list;
-	int		  event_space;
-	struct selinfo	  event_poll;
+	struct drm_prime_file_private prime;
 };
 
-typedef struct drm_lock_data {
-	struct drm_hw_lock	*hw_lock;	/* Hardware lock		   */
-	struct drm_file   *file_priv;   /* Unique identifier of holding process (NULL is kernel)*/
-	int		  lock_queue;	/* Queue of blocked processes	   */
-	unsigned long	  lock_time;	/* Time of last lock in jiffies	   */
-} drm_lock_data_t;
+/**
+ * Lock data.
+ */
+struct drm_lock_data {
+	struct drm_hw_lock *hw_lock;	/**< Hardware lock */
+	/** Private of lock holder's file (NULL=kernel) */
+	struct drm_file *file_priv;
+	wait_queue_head_t lock_queue;	/**< Queue of blocked processes */
+	unsigned long lock_time;	/**< Time of last lock in jiffies */
+	struct mtx spinlock;
+	uint32_t kernel_waiters;
+	uint32_t user_waiters;
+	int idle_has_lock;
+};
 
-/* This structure, in the struct drm_device, is always initialized while the
- * device
- * is open.  dev->dma_lock protects the incrementing of dev->buf_use, which
- * when set marks that no further bufs may be allocated until device teardown
- * occurs (when the last open of the device has closed).  The high/low
- * watermarks of bufs are only touched by the X Server, and thus not
- * concurrently accessed, so no locking is needed.
+/**
+ * DMA data.
  */
-typedef struct drm_device_dma {
-	drm_buf_entry_t	  bufs[DRM_MAX_ORDER+1];
-	int		  buf_count;
-	drm_buf_t	  **buflist;	/* Vector of pointers info bufs	   */
-	int		  seg_count;
-	int		  page_count;
-	unsigned long	  *pagelist;
-	unsigned long	  byte_count;
+struct drm_device_dma {
+
+	struct drm_buf_entry bufs[DRM_MAX_ORDER + 1];	/**< buffers, grouped by their size order */
+	int buf_count;			/**< total number of buffers */
+	struct drm_buf **buflist;		/**< Vector of pointers into drm_device_dma::bufs */
+	int seg_count;
+	int page_count;			/**< number of pages */
+	unsigned long *pagelist;	/**< page list */
+	unsigned long byte_count;
 	enum {
 		_DRM_DMA_USE_AGP = 0x01,
-		_DRM_DMA_USE_SG  = 0x02
+		_DRM_DMA_USE_SG = 0x02,
+		_DRM_DMA_USE_FB = 0x04,
+		_DRM_DMA_USE_PCI_RO = 0x08
 	} flags;
-} drm_device_dma_t;
 
-typedef struct drm_agp_mem {
-	void               *handle;
-	unsigned long      bound; /* address */
-	int                pages;
-	struct drm_agp_mem *prev;
-	struct drm_agp_mem *next;
-} drm_agp_mem_t;
+};
 
-typedef struct drm_agp_head {
-	device_t	   agpdev;
-	struct agp_info    info;
-	const char         *chipset;
-	drm_agp_mem_t      *memory;
-	unsigned long      mode;
-	int                enabled;
-	int                acquired;
-	unsigned long      base;
-   	int 		   mtrr;
-	int		   cant_use_aperture;
-	unsigned long	   page_mask;
-} drm_agp_head_t;
+/**
+ * AGP memory entry.  Stored as a doubly linked list.
+ */
+struct drm_agp_mem {
+	unsigned long handle;		/**< handle */
+	DRM_AGP_MEM *memory;
+	unsigned long bound;		/**< address */
+	int pages;
+	struct list_head head;
+};
 
-typedef struct drm_sg_mem {
+/**
+ * AGP data.
+ *
+ * \sa drm_agp_init() and drm_device::agp.
+ */
+struct drm_agp_head {
+	DRM_AGP_KERN agp_info;		/**< AGP device information */
+	struct list_head memory;
+	unsigned long mode;		/**< AGP mode */
+	device_t bridge;
+	int enabled;			/**< whether the AGP bus as been enabled */
+	int acquired;			/**< whether the AGP device has been acquired */
+	unsigned long base;
+	int agp_mtrr;
+	int cant_use_aperture;
+};
+
+/**
+ * Scatter-gather memory.
+ */
+struct drm_sg_mem {
 	vm_offset_t vaddr;
 	vm_paddr_t *busaddr;
 	vm_pindex_t pages;
-} drm_sg_mem_t;
+};
 
+struct drm_sigdata {
+	int context;
+	struct drm_hw_lock *lock;
+};
+
+/**
+ * Kernel side of a mapping
+ */
 #define DRM_MAP_HANDLE_BITS	(sizeof(void *) == 4 ? 4 : 24)
 #define DRM_MAP_HANDLE_SHIFT	(sizeof(void *) * 8 - DRM_MAP_HANDLE_BITS)
-typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
 
-typedef struct drm_local_map {
-	unsigned long offset;	  /* Physical address (0 for SAREA)       */
-	unsigned long size;	  /* Physical size (bytes)                */
-	enum drm_map_type type;	  /* Type of memory mapped                */
-	enum drm_map_flags flags; /* Flags                                */
-	void *handle;		  /* User-space: "Handle" to pass to mmap */
-				  /* Kernel-space: kernel-virtual address */
-	int mtrr;		  /* Boolean: MTRR used                   */
+struct drm_local_map {
+	resource_size_t offset;	 /**< Requested physical address (0 for SAREA)*/
+	unsigned long size;	 /**< Requested physical size (bytes) */
+	enum drm_map_type type;	 /**< Type of memory to map */
+	enum drm_map_flags flags;	 /**< Flags */
+	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
+				 /**< Kernel-space: kernel-virtual address */
+	int mtrr;		 /**< MTRR slot used */
+
 				  /* Private data                         */
-	int rid;		  /* PCI resource ID for bus_space        */
-	void *virtual;		  /* Kernel-space: kernel-virtual address */
-	struct resource *bsr;
-	bus_space_tag_t bst;
-	bus_space_handle_t bsh;
 	drm_dma_handle_t *dmah;
-	TAILQ_ENTRY(drm_local_map) link;
-} drm_local_map_t;
+};
 
-struct drm_vblank_info {
-	wait_queue_head_t queue;	/* vblank wait queue */
-	atomic_t count;			/* number of VBLANK interrupts */
-					/* (driver must alloc the right number of counters) */
-	atomic_t refcount;		/* number of users of vblank interrupts */
-	u32 last;			/* protected by dev->vbl_lock, used */
-					/* for wraparound handling */
-	int enabled;			/* so we don't call enable more than */
-					/* once per disable */
-	int inmodeset;			/* Display driver is setting mode */
+typedef struct drm_local_map drm_local_map_t;
+
+/**
+ * Mappings list
+ */
+struct drm_map_list {
+	struct list_head head;		/**< list head */
+	struct drm_hash_item hash;
+	struct drm_local_map *map;	/**< mapping */
+	uint64_t user_token;
+	struct drm_master *master;
+	struct drm_mm_node *file_offset_node;	/**< fake offset */
 };
 
-/* Size of ringbuffer for vblank timestamps. Just double-buffer
- * in initial implementation.
+/**
+ * Context handle list
  */
-#define DRM_VBLANKTIME_RBSIZE 2
+struct drm_ctx_list {
+	struct list_head head;		/**< list head */
+	drm_context_t handle;		/**< context handle */
+	struct drm_file *tag;		/**< associated fd private data */
+};
 
-/* Flags and return codes for get_vblank_timestamp() driver function. */
-#define DRM_CALLED_FROM_VBLIRQ 1
-#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
-#define DRM_VBLANKTIME_INVBL             (1 << 1)
-
-/* get_scanout_position() return flags */
-#define DRM_SCANOUTPOS_VALID        (1 << 0)
-#define DRM_SCANOUTPOS_INVBL        (1 << 1)
-#define DRM_SCANOUTPOS_ACCURATE     (1 << 2)
-
 /* location of GART table */
 #define DRM_ATI_GART_MAIN 1
 #define DRM_ATI_GART_FB   2
 
-#define DRM_ATI_GART_PCI  1
+#define DRM_ATI_GART_PCI 1
 #define DRM_ATI_GART_PCIE 2
-#define DRM_ATI_GART_IGP  3
+#define DRM_ATI_GART_IGP 3
 
 struct drm_ati_pcigart_info {
 	int gart_table_location;
@@ -619,29 +541,30 @@
 	void *addr;
 	dma_addr_t bus_addr;
 	dma_addr_t table_mask;
-	dma_addr_t member_mask;
 	struct drm_dma_handle *table_handle;
-	drm_local_map_t mapping;
+	struct drm_local_map mapping;
 	int table_size;
-	struct drm_dma_handle *dmah; /* handle for ATI PCIGART table */
+	struct drm_dma_handle *dmah; /* handle for ATI PCIGART table FIXME */
 };
 
-typedef vm_paddr_t resource_size_t;
-
 /**
  * GEM specific mm private for tracking GEM objects
  */
 struct drm_gem_mm {
+	struct unrhdr *idxunr;
 	struct drm_open_hash offset_hash; /**< User token hash table for maps */
-	struct unrhdr *idxunr;
 };
 
+/**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
 struct drm_gem_object {
 	/** Reference count of this object */
 	u_int refcount;
 
 	/** Handle count of this object. Each handle also holds a reference */
-	u_int handle_count; /* number of handles on this object */
+	atomic_t handle_count; /* number of handles on this object */
 
 	/** Related drm device */
 	struct drm_device *dev;
@@ -649,6 +572,7 @@
 	/** File representing the shmem storage: filp in Linux parlance */
 	vm_object_t vm_obj;
 
+	/* Mapping info for this object */
 	bool on_map;
 	struct drm_hash_item map_list;
 
@@ -683,104 +607,301 @@
 	uint32_t pending_write_domain;
 
 	void *driver_private;
+
+#ifdef FREEBSD_NOTYET
+	/* dma buf exported from this GEM object */
+	struct dma_buf *export_dma_buf;
+
+	/* dma buf attachment backing this object */
+	struct dma_buf_attachment *import_attach;
+#endif /* FREEBSD_NOTYET */
 };
 
-#include "drm_crtc.h"
+#include <dev/drm2/drm_crtc.h>
 
-#ifndef DMA_BIT_MASK
-#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
-#endif
+/* per-master structure */
+struct drm_master {
 
-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+	u_int refcount; /* refcount for this master */
 
-struct drm_driver_info {
-	int	(*load)(struct drm_device *, unsigned long flags);
-	int	(*firstopen)(struct drm_device *);
-	int	(*open)(struct drm_device *, struct drm_file *);
-	void	(*preclose)(struct drm_device *, struct drm_file *file_priv);
-	void	(*postclose)(struct drm_device *, struct drm_file *);
-	void	(*lastclose)(struct drm_device *);
-	int	(*unload)(struct drm_device *);
-	void	(*reclaim_buffers_locked)(struct drm_device *,
-					  struct drm_file *file_priv);
-	int	(*dma_ioctl)(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv);
-	void	(*dma_ready)(struct drm_device *);
-	int	(*dma_quiescent)(struct drm_device *);
-	int	(*dma_flush_block_and_flush)(struct drm_device *, int context,
-					     enum drm_lock_flags flags);
-	int	(*dma_flush_unblock)(struct drm_device *, int context,
-				     enum drm_lock_flags flags);
-	int	(*context_ctor)(struct drm_device *dev, int context);
-	int	(*context_dtor)(struct drm_device *dev, int context);
-	int	(*kernel_context_switch)(struct drm_device *dev, int old,
-					 int new);
-	int	(*kernel_context_switch_unlock)(struct drm_device *dev);
-	void	(*irq_preinstall)(struct drm_device *dev);
-	int	(*irq_postinstall)(struct drm_device *dev);
-	void	(*irq_uninstall)(struct drm_device *dev);
-	void	(*irq_handler)(DRM_IRQ_ARGS);
+	struct list_head head; /**< each minor contains a list of masters */
+	struct drm_minor *minor; /**< link back to minor we are a master for */
 
-	u32	(*get_vblank_counter)(struct drm_device *dev, int crtc);
-	int	(*enable_vblank)(struct drm_device *dev, int crtc);
-	void	(*disable_vblank)(struct drm_device *dev, int crtc);
-	int	(*get_scanout_position)(struct drm_device *dev, int crtc,
-		    int *vpos, int *hpos);
+	char *unique;			/**< Unique identifier: e.g., busid */
+	int unique_len;			/**< Length of unique field */
+	int unique_size;		/**< amount allocated */
 
-	int	(*get_vblank_timestamp)(struct drm_device *dev, int crtc,
-		    int *max_error, struct timeval *vblank_time,
-		    unsigned flags);
+	int blocked;			/**< Blocked due to VC switch? */
 
-	int	(*gem_init_object)(struct drm_gem_object *obj);
-	void	(*gem_free_object)(struct drm_gem_object *obj);
+	/** \name Authentication */
+	/*@{ */
+	struct drm_open_hash magiclist;
+	struct list_head magicfree;
+	/*@} */
 
-	struct cdev_pager_ops *gem_pager_ops;
+	struct drm_lock_data lock;	/**< Information on hardware lock */
 
-	int	(*dumb_create)(struct drm_file *file_priv,
-		    struct drm_device *dev, struct drm_mode_create_dumb *args);
-	int	(*dumb_map_offset)(struct drm_file *file_priv,
-		    struct drm_device *dev, uint32_t handle, uint64_t *offset);
-	int	(*dumb_destroy)(struct drm_file *file_priv,
-		    struct drm_device *dev, uint32_t handle);
+	void *driver_priv; /**< Private structure for driver to use */
+};
 
-	int	(*sysctl_init)(struct drm_device *dev,
-		    struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
-	void	(*sysctl_cleanup)(struct drm_device *dev);
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
 
-	drm_pci_id_list_t *id_entry;	/* PCI ID, name, and chipset private */
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL             (1 << 1)
 
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID        (1 << 0)
+#define DRM_SCANOUTPOS_INVBL        (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE     (1 << 2)
+
+struct drm_bus {
+	int bus_type;
+	int (*get_irq)(struct drm_device *dev);
+	void (*free_irq)(struct drm_device *dev);
+	const char *(*get_name)(struct drm_device *dev);
+	int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+	int (*set_unique)(struct drm_device *dev, struct drm_master *master,
+			  struct drm_unique *unique);
+	int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
+	/* hooks that are for PCI */
+	int (*agp_init)(struct drm_device *dev);
+
+};
+
+/**
+ * DRM driver structure. This structure represent the common code for
+ * a family of cards. There will one drm_device for each card present
+ * in this family
+ */
+struct drm_driver {
+	int (*load) (struct drm_device *, unsigned long flags);
+	int (*firstopen) (struct drm_device *);
+	int (*open) (struct drm_device *, struct drm_file *);
+	void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+	void (*postclose) (struct drm_device *, struct drm_file *);
+	void (*lastclose) (struct drm_device *);
+	int (*unload) (struct drm_device *);
+	int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+	int (*dma_quiescent) (struct drm_device *);
+	int (*context_dtor) (struct drm_device *dev, int context);
+
 	/**
+	 * get_vblank_counter - get raw hardware vblank counter
+	 * @dev: DRM device
+	 * @crtc: counter to fetch
+	 *
+	 * Driver callback for fetching a raw hardware vblank counter for @crtc.
+	 * If a device doesn't have a hardware counter, the driver can simply
+	 * return the value of drm_vblank_count. The DRM core will account for
+	 * missed vblank events while interrupts where disabled based on system
+	 * timestamps.
+	 *
+	 * Wraparound handling and loss of events due to modesetting is dealt
+	 * with in the DRM core code.
+	 *
+	 * RETURNS
+	 * Raw vblank counter value.
+	 */
+	u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
+
+	/**
+	 * enable_vblank - enable vblank interrupt events
+	 * @dev: DRM device
+	 * @crtc: which irq to enable
+	 *
+	 * Enable vblank interrupts for @crtc.  If the device doesn't have
+	 * a hardware vblank counter, this routine should be a no-op, since
+	 * interrupts will have to stay on to keep the count accurate.
+	 *
+	 * RETURNS
+	 * Zero on success, appropriate errno if the given @crtc's vblank
+	 * interrupt cannot be enabled.
+	 */
+	int (*enable_vblank) (struct drm_device *dev, int crtc);
+
+	/**
+	 * disable_vblank - disable vblank interrupt events
+	 * @dev: DRM device
+	 * @crtc: which irq to enable
+	 *
+	 * Disable vblank interrupts for @crtc.  If the device doesn't have
+	 * a hardware vblank counter, this routine should be a no-op, since
+	 * interrupts will have to stay on to keep the count accurate.
+	 */
+	void (*disable_vblank) (struct drm_device *dev, int crtc);
+
+	/**
 	 * Called by \c drm_device_is_agp.  Typically used to determine if a
 	 * card is really attached to AGP or not.
 	 *
 	 * \param dev  DRM device handle
 	 *
-	 * \returns 
+	 * \returns
 	 * One of three values is returned depending on whether or not the
 	 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
 	 * (return of 1), or may or may not be AGP (return of 2).
 	 */
-	int	(*device_is_agp) (struct drm_device * dev);
+	int (*device_is_agp) (struct drm_device *dev);
 
-	drm_ioctl_desc_t *ioctls;
+	/**
+	 * Called by vblank timestamping code.
+	 *
+	 * Return the current display scanout position from a crtc.
+	 *
+	 * \param dev  DRM device.
+	 * \param crtc Id of the crtc to query.
+	 * \param *vpos Target location for current vertical scanout position.
+	 * \param *hpos Target location for current horizontal scanout position.
+	 *
+	 * Returns vpos as a positive number while in active scanout area.
+	 * Returns vpos as a negative number inside vblank, counting the number
+	 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+	 * until start of active scanout / end of vblank."
+	 *
+	 * \return Flags, or'ed together as follows:
+	 *
+	 * DRM_SCANOUTPOS_VALID = Query successful.
+	 * DRM_SCANOUTPOS_INVBL = Inside vblank.
+	 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+	 * this flag means that returned position may be offset by a constant
+	 * but unknown small number of scanlines wrt. real scanout position.
+	 *
+	 */
+	int (*get_scanout_position) (struct drm_device *dev, int crtc,
+				     int *vpos, int *hpos);
+
+	/**
+	 * Called by \c drm_get_last_vbltimestamp. Should return a precise
+	 * timestamp when the most recent VBLANK interval ended or will end.
+	 *
+	 * Specifically, the timestamp in @vblank_time should correspond as
+	 * closely as possible to the time when the first video scanline of
+	 * the video frame after the end of VBLANK will start scanning out,
+	 * the time immediately after end of the VBLANK interval. If the
+	 * @crtc is currently inside VBLANK, this will be a time in the future.
+	 * If the @crtc is currently scanning out a frame, this will be the
+	 * past start time of the current scanout. This is meant to adhere
+	 * to the OpenML OML_sync_control extension specification.
+	 *
+	 * \param dev dev DRM device handle.
+	 * \param crtc crtc for which timestamp should be returned.
+	 * \param *max_error Maximum allowable timestamp error in nanoseconds.
+	 *                   Implementation should strive to provide timestamp
+	 *                   with an error of at most *max_error nanoseconds.
+	 *                   Returns true upper bound on error for timestamp.
+	 * \param *vblank_time Target location for returned vblank timestamp.
+	 * \param flags 0 = Defaults, no special treatment needed.
+	 * \param       DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+	 *	        irq handler. Some drivers need to apply some workarounds
+	 *              for gpu-specific vblank irq quirks if flag is set.
+	 *
+	 * \returns
+	 * Zero if timestamping isn't supported in current display mode or a
+	 * negative number on failure. A positive status code on success,
+	 * which describes how the vblank_time timestamp was computed.
+	 */
+	int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+				     int *max_error,
+				     struct timeval *vblank_time,
+				     unsigned flags);
+
+	/* these have to be filled in */
+
+	irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
+	void (*irq_preinstall) (struct drm_device *dev);
+	int (*irq_postinstall) (struct drm_device *dev);
+	void (*irq_uninstall) (struct drm_device *dev);
+	void (*set_version) (struct drm_device *dev,
+			     struct drm_set_version *sv);
+
+	/* Master routines */
+	int (*master_create)(struct drm_device *dev, struct drm_master *master);
+	void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+	/**
+	 * master_set is called whenever the minor master is set.
+	 * master_drop is called whenever the minor master is dropped.
+	 */
+
+	int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+			  bool from_open);
+	void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
+			    bool from_release);
+
+	/**
+	 * Driver-specific constructor for drm_gem_objects, to set up
+	 * obj->driver_private.
+	 *
+	 * Returns 0 on success.
+	 */
+	int (*gem_init_object) (struct drm_gem_object *obj);
+	void (*gem_free_object) (struct drm_gem_object *obj);
+	int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+	void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+
+#ifdef FREEBSD_NOTYET
+	/* prime: */
+	/* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+	int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+				uint32_t handle, uint32_t flags, int *prime_fd);
+	/* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+	int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+				int prime_fd, uint32_t *handle);
+	/* export GEM -> dmabuf */
+	struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+				struct drm_gem_object *obj, int flags);
+	/* import dmabuf -> GEM */
+	struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+				struct dma_buf *dma_buf);
+#endif /* defined(FREEBSD_NOTYET) */
+
+	/* dumb alloc support */
+	int (*dumb_create)(struct drm_file *file_priv,
+			   struct drm_device *dev,
+			   struct drm_mode_create_dumb *args);
+	int (*dumb_map_offset)(struct drm_file *file_priv,
+			       struct drm_device *dev, uint32_t handle,
+			       uint64_t *offset);
+	int (*dumb_destroy)(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    uint32_t handle);
+
+	/* Driver private ops for this object */
+	struct cdev_pager_ops *gem_pager_ops;
+
+	int	(*sysctl_init)(struct drm_device *dev,
+		    struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
+	void	(*sysctl_cleanup)(struct drm_device *dev);
+
+	int major;
+	int minor;
+	int patchlevel;
+	char *name;
+	char *desc;
+	char *date;
+
+	u32 driver_features;
+	int dev_priv_size;
+	struct drm_ioctl_desc *ioctls;
+	int num_ioctls;
+	struct drm_bus *bus;
 #ifdef COMPAT_FREEBSD32
-	drm_ioctl_desc_t *compat_ioctls;
-	int	*compat_ioctls_nr;
+	struct drm_ioctl_desc *compat_ioctls;
+	int *num_compat_ioctls;
 #endif
-	int	max_ioctl;
 
 	int	buf_priv_size;
+};
 
-	int	major;
-	int	minor;
-	int	patchlevel;
-	const char *name;		/* Simple driver name		   */
-	const char *desc;		/* Longer driver name		   */
-	const char *date;		/* Date of last major changes.	   */
+#define DRM_MINOR_UNASSIGNED 0
+#define DRM_MINOR_LEGACY 1
+#define DRM_MINOR_CONTROL 2
+#define DRM_MINOR_RENDER 3
 
-	u32 driver_features;
-};
-
 /**
  * DRM minor structure. This structure represents a drm minor number.
  */
@@ -787,6 +908,7 @@
 struct drm_minor {
 	int index;			/**< Minor device number */
 	int type;                       /**< Control or render */
+	struct cdev *device;		/**< Device number for mknod */
 	device_t kdev;			/**< OS device */
 	struct drm_device *dev;
 
@@ -793,6 +915,8 @@
 	struct drm_master *master; /* currently active master for this node */
 	struct list_head master_list;
 	struct drm_mode_group mode_group;
+
+	struct sigio *buf_sigio;	/* Processes waiting for SIGIO     */
 };
 
 /* mode specified on the command line */
@@ -810,6 +934,7 @@
 	enum drm_connector_force force;
 };
 
+
 struct drm_pending_vblank_event {
 	struct drm_pending_event base;
 	int pipe;
@@ -816,101 +941,80 @@
 	struct drm_event_vblank event;
 };
 
-/* Length for the array of resource pointers for drm_get_resource_*. */
-#define DRM_MAX_PCI_RESOURCE	6
-
-/** 
- * DRM device functions structure
+/**
+ * DRM device structure. This structure represent a complete card that
+ * may contain multiple heads.
  */
 struct drm_device {
-	struct drm_driver_info *driver;
-	drm_pci_id_list_t *id_entry;	/* PCI ID, name, and chipset private */
+	int if_version;			/**< Highest interface version set */
 
-	u_int16_t pci_device;		/* PCI device id */
-	u_int16_t pci_vendor;		/* PCI vendor id */
+	/** \name Locks */
+	/*@{ */
+	struct mtx count_lock;		/**< For inuse, drm_device::open_count, drm_device::buf_use */
+	struct sx dev_struct_lock;	/**< For others */
+	/*@} */
 
-	char		  *unique;	/* Unique identifier: e.g., busid  */
-	int		  unique_len;	/* Length of unique field	   */
-	device_t	  device;	/* Device instance from newbus     */
-	struct cdev	  *devnode;	/* Device number for mknod	   */
-	int		  if_version;	/* Highest interface version set */
+	/** \name Usage Counters */
+	/*@{ */
+	int open_count;			/**< Outstanding files open */
+	atomic_t ioctl_count;		/**< Outstanding IOCTLs pending */
+	atomic_t vma_count;		/**< Outstanding vma areas open */
+	int buf_use;			/**< Buffers in use -- cannot alloc */
+	atomic_t buf_alloc;		/**< Buffer allocation in progress */
+	/*@} */
 
-	int		  flags;	/* Flags to open(2)		   */
+	/** \name Performance counters */
+	/*@{ */
+	unsigned long counters;
+	enum drm_stat_type types[15];
+	atomic_t counts[15];
+	/*@} */
 
-				/* Locks */
-	struct mtx	  dma_lock;	/* protects dev->dma */
-	struct mtx	  irq_lock;	/* protects irq condition checks */
-	struct mtx	  dev_lock;	/* protects everything else */
-	struct sx	  dev_struct_lock;
-	DRM_SPINTYPE	  drw_lock;
+	struct list_head filelist;
 
-				/* Usage Counters */
-	int		  open_count;	/* Outstanding files open	   */
-	int		  buf_use;	/* Buffers in use -- cannot alloc  */
+	/** \name Memory management */
+	/*@{ */
+	struct list_head maplist;	/**< Linked list of regions */
+	int map_count;			/**< Number of mappable regions */
+	struct drm_open_hash map_hash;	/**< User token hash table for maps */
 
-				/* Performance counters */
-	unsigned long     counters;
-	enum drm_stat_type	types[15];
-	atomic_t          counts[15];
+	/** \name Context handle management */
+	/*@{ */
+	struct list_head ctxlist;	/**< Linked list of context handles */
+	int ctx_count;			/**< Number of context handles */
+	struct mtx ctxlist_mutex;	/**< For ctxlist */
+	drm_local_map_t **context_sareas;
+	int max_context;
+	unsigned long *ctx_bitmap;
 
-				/* Authentication */
-	drm_file_list_t   files;
-	drm_magic_head_t  magiclist[DRM_HASH_SIZE];
+	/*@} */
 
-	/* Linked list of mappable regions. Protected by dev_lock */
-	drm_map_list_t	  maplist;
-	struct unrhdr	  *map_unrhdr;
+	/** \name DMA support */
+	/*@{ */
+	struct drm_device_dma *dma;		/**< Optional pointer for DMA support */
+	/*@} */
 
-	drm_local_map_t	  **context_sareas;
-	int		  max_context;
+	/** \name Context support */
+	/*@{ */
+	int irq_enabled;		/**< True if irq handler is enabled */
+	atomic_t context_flag;		/**< Context swapping flag */
+	atomic_t interrupt_flag;	/**< Interruption handler flag */
+	atomic_t dma_flag;		/**< DMA dispatch flag */
+	wait_queue_head_t context_wait;	/**< Processes waiting on ctx switch */
+	int last_checked;		/**< Last context checked for DMA */
+	int last_context;		/**< Last current context */
+	unsigned long last_switch;	/**< jiffies at last context switch */
+	/*@} */
 
-	drm_lock_data_t	  lock;		/* Information on hardware lock	   */
+	/** \name VBLANK IRQ support */
+	/*@{ */
 
-				/* DMA queues (contexts) */
-	drm_device_dma_t  *dma;		/* Optional pointer for DMA support */
-
-				/* Context support */
-	int		  irq;		/* Interrupt used by board	   */
-	int		  irq_enabled;	/* True if the irq handler is enabled */
-	int		  msi_enabled;	/* MSI enabled */
-	int		  irqrid;	/* Interrupt used by board */
-	struct resource   *irqr;	/* Resource for interrupt used by board	   */
-	void		  *irqh;	/* Handle from bus_setup_intr      */
-
-	/* Storage of resource pointers for drm_get_resource_* */
-	struct resource   *pcir[DRM_MAX_PCI_RESOURCE];
-	int		  pcirid[DRM_MAX_PCI_RESOURCE];
-
-	int		  pci_domain;
-	int		  pci_bus;
-	int		  pci_slot;
-	int		  pci_func;
-
-	atomic_t	  context_flag;	/* Context swapping flag	   */
-	int		  last_context;	/* Last current context		   */
-
-	int		  num_crtcs;
-
-	struct sigio      *buf_sigio;	/* Processes waiting for SIGIO     */
-
-				/* Sysctl support */
-	struct drm_sysctl_info *sysctl;
-	int		  sysctl_node_idx;
-
-	drm_agp_head_t    *agp;
-	drm_sg_mem_t      *sg;  /* Scatter gather memory */
-	atomic_t          *ctx_bitmap;
-	void		  *dev_private;
-	unsigned int	  agp_buffer_token;
-	drm_local_map_t   *agp_buffer_map;
-
-	struct drm_minor *control;		/**< Control node for card */
-	struct drm_minor *primary;		/**< render type primary screen head */
-
-	struct unrhdr	  *drw_unrhdr;
-	/* RB tree of drawable infos */
-	RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
-
+	/*
+	 * At load time, disabling the vblank interrupt won't be allowed since
+	 * old clients may not call the modeset ioctl and therefore misbehave.
+	 * Once the modeset ioctl *has* been called though, we can safely
+	 * disable them when unused.
+	 */
 	int vblank_disable_allowed;
 
 	atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
@@ -928,21 +1032,85 @@
 
 	u32 max_vblank_count;           /**< size of vblank counter register */
 
+	/**
+	 * List of events
+	 */
 	struct list_head vblank_event_list;
-	struct mtx	 event_lock;
+	struct mtx event_lock;
 
+	/*@} */
+
+	struct drm_agp_head *agp;	/**< AGP data */
+
+	device_t dev;			/* Device instance from newbus */
+	uint16_t pci_device;		/* PCI device id */
+	uint16_t pci_vendor;		/* PCI vendor id */
+	uint16_t pci_subdevice;		/* PCI subsystem device id */
+	uint16_t pci_subvendor;		/* PCI subsystem vendor id */
+
+	struct drm_sg_mem *sg;	/**< Scatter gather memory */
+	unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
+	void *dev_private;		/**< device private data */
+	void *mm_private;
+	struct drm_sigdata sigdata;	   /**< For block_all_signals */
+	sigset_t sigmask;
+
+	struct drm_driver *driver;
+	struct drm_local_map *agp_buffer_map;
+	unsigned int agp_buffer_token;
+	struct drm_minor *control;		/**< Control node for card */
+	struct drm_minor *primary;		/**< render type primary screen head */
+
         struct drm_mode_config mode_config;	/**< Current mode config */
 
-	/* GEM part */
-	struct sx	  object_name_lock;
+	/** \name GEM information */
+	/*@{ */
+	struct sx object_name_lock;
 	struct drm_gem_names object_names;
-	void		 *mm_private;
+	/*@} */
+	int switch_power_state;
 
+	atomic_t unplugged; /* device has been unplugged or gone away */
+
+				/* Locks */
+	struct mtx	  dma_lock;	/* protects dev->dma */
+	struct mtx	  irq_lock;	/* protects irq condition checks */
+
+				/* Context support */
+	int		  irq;		/* Interrupt used by board	   */
+	int		  msi_enabled;	/* MSI enabled */
+	int		  irqrid;	/* Interrupt used by board */
+	struct resource   *irqr;	/* Resource for interrupt used by board	   */
+	void		  *irqh;	/* Handle from bus_setup_intr      */
+
+	/* Storage of resource pointers for drm_get_resource_* */
+#define	DRM_MAX_PCI_RESOURCE	6
+	struct resource   *pcir[DRM_MAX_PCI_RESOURCE];
+	int		  pcirid[DRM_MAX_PCI_RESOURCE];
+	struct mtx	  pcir_lock;
+
+	int		  pci_domain;
+	int		  pci_bus;
+	int		  pci_slot;
+	int		  pci_func;
+
+				/* Sysctl support */
+	struct drm_sysctl_info *sysctl;
+	int		  sysctl_node_idx;
+
+	void		  *drm_ttm_bdev;
+
 	void *sysctl_private;
 	char busid_str[128];
 	int modesetting;
+
+	drm_pci_id_list_t *id_entry;	/* PCI ID, name, and chipset private */
 };
 
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+
 static __inline__ int drm_core_check_feature(struct drm_device *dev,
 					     int feature)
 {
@@ -949,6 +1117,12 @@
 	return ((dev->driver->driver_features & feature) ? 1 : 0);
 }
 
+static inline int drm_dev_to_irq(struct drm_device *dev)
+{
+	return dev->driver->bus->get_irq(dev);
+}
+
+
 #if __OS_HAS_AGP
 static inline int drm_core_has_AGP(struct drm_device *dev)
 {
@@ -958,130 +1132,176 @@
 #define drm_core_has_AGP(dev) (0)
 #endif
 
-enum dmi_field {
-        DMI_NONE,
-        DMI_BIOS_VENDOR,
-        DMI_BIOS_VERSION,
-        DMI_BIOS_DATE,
-        DMI_SYS_VENDOR,
-        DMI_PRODUCT_NAME,
-        DMI_PRODUCT_VERSION,
-        DMI_PRODUCT_SERIAL,
-        DMI_PRODUCT_UUID,
-        DMI_BOARD_VENDOR,
-        DMI_BOARD_NAME,
-        DMI_BOARD_VERSION,
-        DMI_BOARD_SERIAL,
-        DMI_BOARD_ASSET_TAG,
-        DMI_CHASSIS_VENDOR,
-        DMI_CHASSIS_TYPE,
-        DMI_CHASSIS_VERSION,
-        DMI_CHASSIS_SERIAL,
-        DMI_CHASSIS_ASSET_TAG,
-        DMI_STRING_MAX,
-};
+#if __OS_HAS_MTRR
+static inline int drm_core_has_MTRR(struct drm_device *dev)
+{
+	return drm_core_check_feature(dev, DRIVER_USE_MTRR);
+}
 
-struct dmi_strmatch {
-	unsigned char slot;
-	char substr[79];
-};
+#define DRM_MTRR_WC		MDF_WRITECOMBINE
 
-struct dmi_system_id {
-        int (*callback)(const struct dmi_system_id *);
-        const char *ident;
-        struct dmi_strmatch matches[4];
-};
-#define	DMI_MATCH(a, b) {(a), (b)}
-bool dmi_check_system(const struct dmi_system_id *);
+int drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags);
+int drm_mtrr_del(int handle, unsigned long offset, unsigned long size, unsigned int flags);
 
-extern int	drm_debug_flag;
-extern int	drm_notyet_flag;
-extern unsigned int drm_vblank_offdelay;
-extern unsigned int drm_timestamp_precision;
+#else
+#define drm_core_has_MTRR(dev) (0)
 
-/* Device setup support (drm_drv.c) */
-int	drm_probe(device_t kdev, drm_pci_id_list_t *idlist);
-int	drm_attach(device_t kdev, drm_pci_id_list_t *idlist);
-int	drm_create_cdevs(device_t kdev);
-void	drm_close(void *data);
-int	drm_detach(device_t kdev);
+#define DRM_MTRR_WC		0
+
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+			       unsigned int flags)
+{
+	return 0;
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+			       unsigned long size, unsigned int flags)
+{
+	return 0;
+}
+#endif
+
+/******************************************************************/
+/** \name Internal function definitions */
+/*@{*/
+
+				/* Driver support (drm_drv.h) */
 d_ioctl_t drm_ioctl;
+extern int drm_lastclose(struct drm_device *dev);
+
+				/* Device support (drm_fops.h) */
+extern struct sx drm_global_mutex;
 d_open_t drm_open;
 d_read_t drm_read;
+extern void drm_release(void *data);
+
+				/* Mapping support (drm_vm.h) */
+d_mmap_t drm_mmap;
+int	drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,
+	    vm_size_t size, struct vm_object **obj_res, int nprot);
 d_poll_t drm_poll;
-d_mmap_t drm_mmap;
-extern drm_local_map_t	*drm_getsarea(struct drm_device *dev);
 
-void drm_event_wakeup(struct drm_pending_event *e);
+				/* Memory management support (drm_memory.h) */
+extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
+extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+#ifdef FREEBSD_NOTYET
+extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+				       struct page **pages,
+				       unsigned long num_pages,
+				       uint32_t gtt_offset,
+				       uint32_t type);
+#endif /* FREEBSD_NOTYET */
+extern int drm_unbind_agp(DRM_AGP_MEM * handle);
 
-int drm_add_busid_modesetting(struct drm_device *dev,
-    struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
+				/* Misc. IOCTL support (drm_ioctl.h) */
+extern int drm_irq_by_busid(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+extern int drm_getunique(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int drm_setunique(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int drm_getmap(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+extern int drm_getclient(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int drm_getstats(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int drm_getcap(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+extern int drm_setversion(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+extern int drm_noop(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv);
 
-/* File operations helpers (drm_fops.c) */
-extern int		drm_open_helper(struct cdev *kdev, int flags, int fmt,
-					 DRM_STRUCTPROC *p,
-					struct drm_device *dev);
+				/* Context IOCTL support (drm_context.h) */
+extern int drm_resctx(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+extern int drm_addctx(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+extern int drm_modctx(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+extern int drm_getctx(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+extern int drm_switchctx(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int drm_newctx(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+extern int drm_rmctx(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv);
 
-/* Memory management support (drm_memory.c) */
-void	drm_mem_init(void);
-void	drm_mem_uninit(void);
-void	*drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map);
-void	*drm_ioremap(struct drm_device *dev, drm_local_map_t *map);
-void	drm_ioremapfree(drm_local_map_t *map);
-int	drm_mtrr_add(unsigned long offset, size_t size, int flags);
-int	drm_mtrr_del(int handle, unsigned long offset, size_t size, int flags);
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
 
-int	drm_context_switch(struct drm_device *dev, int old, int new);
-int	drm_context_switch_complete(struct drm_device *dev, int new);
+extern int drm_setsareactx(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+extern int drm_getsareactx(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
 
-int	drm_ctxbitmap_init(struct drm_device *dev);
-void	drm_ctxbitmap_cleanup(struct drm_device *dev);
-void	drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
-int	drm_ctxbitmap_next(struct drm_device *dev);
+				/* Authentication IOCTL support (drm_auth.h) */
+extern int drm_getmagic(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int drm_authmagic(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
 
-/* Locking IOCTL support (drm_lock.c) */
-int	drm_lock_take(struct drm_lock_data *lock_data,
-		      unsigned int context);
-int	drm_lock_transfer(struct drm_lock_data *lock_data,
-			  unsigned int context);
-int	drm_lock_free(struct drm_lock_data *lock_data,
-		      unsigned int context);
+/* Cache management (drm_cache.c) */
+void drm_clflush_pages(vm_page_t *pages, unsigned long num_pages);
+void drm_clflush_virt_range(char *addr, unsigned long length);
 
-/* Buffer management support (drm_bufs.c) */
-unsigned long drm_get_resource_start(struct drm_device *dev,
-				     unsigned int resource);
-unsigned long drm_get_resource_len(struct drm_device *dev,
-				   unsigned int resource);
-void	drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
-int	drm_order(unsigned long size);
-int	drm_addmap(struct drm_device *dev, unsigned long offset,
-		   unsigned long size,
-		   enum drm_map_type type, enum drm_map_flags flags,
-		   drm_local_map_t **map_ptr);
-int	drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request);
-int	drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request);
-int	drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request);
+				/* Locking IOCTL support (drm_lock.h) */
+extern int drm_lock(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv);
+extern int drm_unlock(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+extern void drm_idlelock_release(struct drm_lock_data *lock_data);
 
-/* DMA support (drm_dma.c) */
-int	drm_dma_setup(struct drm_device *dev);
-void	drm_dma_takedown(struct drm_device *dev);
-void	drm_free_buffer(struct drm_device *dev, drm_buf_t *buf);
-void	drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv);
-#define drm_core_reclaim_buffers drm_reclaim_buffers
+/*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ */
 
-/* IRQ support (drm_irq.c) */
-int	drm_irq_install(struct drm_device *dev);
-int	drm_irq_uninstall(struct drm_device *dev);
-irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
-void	drm_driver_irq_preinstall(struct drm_device *dev);
-void	drm_driver_irq_postinstall(struct drm_device *dev);
-void	drm_driver_irq_uninstall(struct drm_device *dev);
+extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
 
-void	drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
-void	drm_vblank_post_modeset(struct drm_device *dev, int crtc);
-int 	drm_modeset_ctl(struct drm_device *dev, void *data,
+				/* Buffer management support (drm_bufs.h) */
+extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
+		      unsigned int size, enum drm_map_type type,
+		      enum drm_map_flags flags, struct drm_local_map **map_ptr);
+extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
+extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
+extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+extern int drm_addbufs(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+extern int drm_infobufs(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
+extern int drm_markbufs(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int drm_freebufs(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int drm_mapbufs(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+extern int drm_order(unsigned long size);
 
+				/* DMA support (drm_dma.h) */
+extern int drm_dma_setup(struct drm_device *dev);
+extern void drm_dma_takedown(struct drm_device *dev);
+extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
+extern void drm_core_reclaim_buffers(struct drm_device *dev,
+				     struct drm_file *filp);
+
+				/* IRQ support (drm_irq.h) */
+extern int drm_control(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+extern int drm_irq_install(struct drm_device *dev);
+extern int drm_irq_uninstall(struct drm_device *dev);
+
 extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
 			   struct drm_file *filp);
@@ -1089,8 +1309,9 @@
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 				     struct timeval *vblanktime);
+extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
+				     struct drm_pending_vblank_event *e);
 extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
-void drm_handle_vblank_events(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern void drm_vblank_off(struct drm_device *dev, int crtc);
@@ -1104,304 +1325,467 @@
 						 struct drm_crtc *refcrtc);
 extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
 
-struct timeval ns_to_timeval(const int64_t nsec);
-int64_t timeval_to_ns(const struct timeval *tv);
+extern bool
+drm_mode_parse_command_line_for_connector(const char *mode_option,
+					  struct drm_connector *connector,
+					  struct drm_cmdline_mode *mode);
 
-/* AGP/PCI Express/GART support (drm_agpsupport.c) */
-int	drm_device_is_agp(struct drm_device *dev);
-int	drm_device_is_pcie(struct drm_device *dev);
-drm_agp_head_t *drm_agp_init(void);
-int	drm_agp_acquire(struct drm_device *dev);
-int	drm_agp_release(struct drm_device *dev);
-int	drm_agp_info(struct drm_device * dev, struct drm_agp_info *info);
-int	drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-void	*drm_agp_allocate_memory(size_t pages, u32 type);
-int	drm_agp_free_memory(void *handle);
-int	drm_agp_bind_memory(void *handle, off_t start);
-int	drm_agp_unbind_memory(void *handle);
-int	drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int	drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int	drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-int	drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+extern struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+				  struct drm_cmdline_mode *cmd);
 
-/* Scatter Gather Support (drm_scatter.c) */
-void	drm_sg_cleanup(drm_sg_mem_t *entry);
-int	drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
+/* Modesetting support */
+extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+extern int drm_modeset_ctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
 
-/* sysctl support (drm_sysctl.h) */
-extern int		drm_sysctl_init(struct drm_device *dev);
-extern int		drm_sysctl_cleanup(struct drm_device *dev);
+				/* AGP/GART support (drm_agpsupport.h) */
+extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern int drm_agp_acquire(struct drm_device *dev);
+extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+extern int drm_agp_release(struct drm_device *dev);
+extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
 
-/* ATI PCIGART support (ati_pcigart.c) */
-int	drm_ati_pcigart_init(struct drm_device *dev,
-				struct drm_ati_pcigart_info *gart_info);
-int	drm_ati_pcigart_cleanup(struct drm_device *dev,
-				struct drm_ati_pcigart_info *gart_info);
+				/* Stub support (drm_stub.h) */
+extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+struct drm_master *drm_master_create(struct drm_minor *minor);
+extern struct drm_master *drm_master_get(struct drm_master *master);
+extern void drm_master_put(struct drm_master **master);
 
-/* Cache management (drm_memory.c) */
-void	drm_clflush_pages(vm_page_t *pages, unsigned long num_pages);
+extern void drm_put_dev(struct drm_device *dev);
+extern int drm_put_minor(struct drm_minor **minor);
+extern void drm_unplug_dev(struct drm_device *dev);
+extern unsigned int drm_debug;
+extern unsigned int drm_notyet;
 
-/* Locking IOCTL support (drm_drv.c) */
-int	drm_lock(struct drm_device *dev, void *data,
-		 struct drm_file *file_priv);
-int	drm_unlock(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv);
-int	drm_version(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv);
-int	drm_setversion(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv);
+extern unsigned int drm_vblank_offdelay;
+extern unsigned int drm_timestamp_precision;
+extern unsigned int drm_timestamp_monotonic;
 
-/* Misc. IOCTL support (drm_ioctl.c) */
-int	drm_irq_by_busid(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv);
-int	drm_getunique(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv);
-int	drm_setunique(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv);
-int	drm_getmap(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv);
-int	drm_getclient(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv);
-int	drm_getstats(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv);
-int	drm_getcap(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv);
-int	drm_noop(struct drm_device *dev, void *data,
-		 struct drm_file *file_priv);
+extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
 
-/* Context IOCTL support (drm_context.c) */
-int	drm_resctx(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv);
-int	drm_addctx(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv);
-int	drm_modctx(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv);
-int	drm_getctx(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv);
-int	drm_switchctx(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv);
-int	drm_newctx(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv);
-int	drm_rmctx(struct drm_device *dev, void *data,
-		  struct drm_file *file_priv);
-int	drm_setsareactx(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
-int	drm_getsareactx(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
 
-/* Drawable IOCTL support (drm_drawable.c) */
-int	drm_adddraw(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv);
-int	drm_rmdraw(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv);
-int	drm_update_draw(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
-						int handle);
+#ifdef FREEBSD_NOTYET
+extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+		int *prime_fd);
+extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+		struct drm_file *file_priv, int prime_fd, uint32_t *handle);
 
-/* Drawable support (drm_drawable.c) */
-void drm_drawable_free_all(struct drm_device *dev);
+extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv);
+extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv);
 
-/* Authentication IOCTL support (drm_auth.c) */
-int	drm_getmagic(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv);
-int	drm_authmagic(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv);
+extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, vm_page_t *pages,
+					    dma_addr_t *addrs, int max_pages);
+extern struct sg_table *drm_prime_pages_to_sg(vm_page_t *pages, int nr_pages);
+extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
 
-/* Buffer management support (drm_bufs.c) */
-int	drm_addmap_ioctl(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv);
-int	drm_rmmap_ioctl(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
-int	drm_addbufs(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv);
-int	drm_infobufs(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv);
-int	drm_markbufs(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv);
-int	drm_freebufs(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv);
-int	drm_mapbufs(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv);
 
-/* DMA support (drm_dma.c) */
-int	drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv);
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
+int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
 
-/* IRQ support (drm_irq.c) */
-int	drm_control(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv);
+int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
+int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
+			 struct drm_gem_object **obj);
+#endif /* FREEBSD_NOTYET */
 
-/* AGP/GART support (drm_agpsupport.c) */
-int	drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv);
-int	drm_agp_release_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv);
-int	drm_agp_enable_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv);
-int	drm_agp_info_ioctl(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv);
-int	drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv);
-int	drm_agp_free_ioctl(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv);
-int	drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv);
-int	drm_agp_bind_ioctl(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv);
+				/* Scatter Gather Support (drm_scatter.h) */
+extern void drm_sg_cleanup(struct drm_sg_mem * entry);
+extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
+extern int drm_sg_free(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
 
-				/* Stub support (drm_stub.h) */
-extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
-			       struct drm_file *file_priv);
-extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
+			       /* ATI PCIGART support (ati_pcigart.h) */
+extern int drm_ati_pcigart_init(struct drm_device *dev,
+				struct drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+				   struct drm_ati_pcigart_info * gart_info);
 
-/* Scatter Gather Support (drm_scatter.c) */
-int	drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv);
-int	drm_sg_free(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv);
+extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
+				       size_t align, dma_addr_t maxaddr);
+extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
 
-/* consistent PCI memory functions (drm_pci.c) */
-drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
-				size_t align, dma_addr_t maxaddr);
-void	drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
-
 /* Graphics Execution Manager library functions (drm_gem.c) */
 int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
-
-int drm_gem_close_ioctl(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
-int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
-int drm_gem_open_ioctl(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv);
-int drm_gem_handle_create(struct drm_file *file_priv,
-			  struct drm_gem_object *obj,
-			  u32 *handlep);
-int drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle);
-void drm_gem_object_handle_reference(struct drm_gem_object *obj);
-void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
-void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj);
-void drm_gem_object_handle_free(struct drm_gem_object *obj);
-void drm_gem_object_reference(struct drm_gem_object *obj);
-void drm_gem_object_unreference(struct drm_gem_object *obj);
-void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
 void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct drm_gem_object *obj);
-int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
-    size_t size);
+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
+					    size_t size);
+int drm_gem_object_init(struct drm_device *dev,
+			struct drm_gem_object *obj, size_t size);
 int drm_gem_private_object_init(struct drm_device *dev,
-    struct drm_gem_object *obj, size_t size);
-struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
-    size_t size);
-struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
-    struct drm_file *file_priv, uint32_t handle);
-
-void drm_gem_open(struct drm_device *dev, struct drm_file *file_priv);
-void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv);
-
-int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
-void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
-int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
-    struct vm_object **obj_res, int nprot);
+			struct drm_gem_object *obj, size_t size);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
+int drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
+    vm_size_t size, struct vm_object **obj_res, int nprot);
 void drm_gem_pager_dtr(void *obj);
 
-void drm_device_lock_mtx(struct drm_device *dev);
-void drm_device_unlock_mtx(struct drm_device *dev);
-int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
-    const char *msg, int timeout);
-void drm_device_assert_mtx_locked(struct drm_device *dev);
-void drm_device_assert_mtx_unlocked(struct drm_device *dev);
+#include <dev/drm2/drm_global.h>
 
-void drm_device_lock_struct(struct drm_device *dev);
-void drm_device_unlock_struct(struct drm_device *dev);
-int drm_device_sleep_struct(struct drm_device *dev, void *chan, int flags,
-    const char *msg, int timeout);
-void drm_device_assert_struct_locked(struct drm_device *dev);
-void drm_device_assert_struct_unlocked(struct drm_device *dev);
+static inline void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
 
-void drm_compat_locking_init(struct drm_device *dev);
-void drm_sleep_locking_init(struct drm_device *dev);
+	KASSERT(obj->refcount > 0, ("Dangling obj %p", obj));
+	refcount_acquire(&obj->refcount);
+}
 
-/* drm_modes.c */
-bool drm_mode_parse_command_line_for_connector(const char *mode_option,
-    struct drm_connector *connector, struct drm_cmdline_mode *mode);
-struct drm_display_mode *drm_mode_create_from_cmdline_mode(
-    struct drm_device *dev, struct drm_cmdline_mode *cmd);
+static inline void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
 
-/* drm_edid.c */
-u8 *drm_find_cea_extension(struct edid *edid);
+	if (obj == NULL)
+		return;
+	if (refcount_release(&obj->refcount))
+		drm_gem_object_free(obj);
+}
 
-/* Inline replacements for drm_alloc and friends */
-static __inline__ void *
-drm_alloc(size_t size, struct malloc_type *area)
+static inline void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
 {
-	return malloc(size, area, M_NOWAIT);
+	if (obj != NULL) {
+		struct drm_device *dev = obj->dev;
+		DRM_LOCK(dev);
+		drm_gem_object_unreference(obj);
+		DRM_UNLOCK(dev);
+	}
 }
 
-static __inline__ void *
-drm_calloc(size_t nmemb, size_t size, struct malloc_type *area)
+int drm_gem_handle_create(struct drm_file *file_priv,
+			  struct drm_gem_object *obj,
+			  u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
+
+static inline void
+drm_gem_object_handle_reference(struct drm_gem_object *obj)
 {
-	return malloc(size * nmemb, area, M_NOWAIT | M_ZERO);
+	drm_gem_object_reference(obj);
+	atomic_inc(&obj->handle_count);
 }
 
-static __inline__ void *
-drm_realloc(void *oldpt, size_t oldsize, size_t size,
-    struct malloc_type *area)
+static inline void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
 {
-	return reallocf(oldpt, size, area, M_NOWAIT);
+	if (obj == NULL)
+		return;
+
+	if (atomic_read(&obj->handle_count) == 0)
+		return;
+	/*
+	 * Must bump handle count first as this may be the last
+	 * ref, in which case the object would disappear before we
+	 * checked for a name
+	 */
+	if (atomic_dec_and_test(&obj->handle_count))
+		drm_gem_object_handle_free(obj);
+	drm_gem_object_unreference(obj);
 }
 
-static __inline__ void
-drm_free(void *pt, size_t size, struct malloc_type *area)
+static inline void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
 {
-	free(pt, area);
+	if (obj == NULL)
+		return;
+
+	if (atomic_read(&obj->handle_count) == 0)
+		return;
+
+	/*
+	* Must bump handle count first as this may be the last
+	* ref, in which case the object would disappear before we
+	* checked for a name
+	*/
+
+	if (atomic_dec_and_test(&obj->handle_count))
+		drm_gem_object_handle_free(obj);
+	drm_gem_object_unreference_unlocked(obj);
 }
 
-/* Inline replacements for DRM_IOREMAP macros */
-static __inline__ void
-drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
+void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+
+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
+					     struct drm_file *filp,
+					     u32 handle);
+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
+extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
+extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
+extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
+
+static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
+							 unsigned int token)
 {
-	map->virtual = drm_ioremap_wc(dev, map);
+	struct drm_map_list *_entry;
+	list_for_each_entry(_entry, &dev->maplist, head)
+	    if (_entry->user_token == token)
+		return _entry->map;
+	return NULL;
 }
-static __inline__ void
-drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
+
+static __inline__ void drm_core_dropmap(struct drm_local_map *map)
 {
-	map->virtual = drm_ioremap(dev, map);
 }
-static __inline__ void
-drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
-{
-	if ( map->virtual && map->size )
-		drm_ioremapfree(map);
-}
 
-static __inline__ struct drm_local_map *
-drm_core_findmap(struct drm_device *dev, unsigned long offset)
-{
-	drm_local_map_t *map;
+extern int drm_fill_in_dev(struct drm_device *dev,
+			   struct drm_driver *driver);
+extern void drm_cancel_fill_in_dev(struct drm_device *dev);
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
+/*@}*/
 
-	DRM_LOCK_ASSERT(dev);
-	TAILQ_FOREACH(map, &dev->maplist, link) {
-		if (offset == (unsigned long)map->handle)
-			return map;
-	}
-	return NULL;
-}
+/* PCI section */
+int drm_pci_device_is_agp(struct drm_device *dev);
+int drm_pci_device_is_pcie(struct drm_device *dev);
 
-static __inline__ void drm_core_dropmap(struct drm_map *map)
-{
+extern int drm_get_pci_dev(device_t kdev, struct drm_device *dev,
+			   struct drm_driver *driver);
+
+#define DRM_PCIE_SPEED_25 1
+#define DRM_PCIE_SPEED_50 2
+#define DRM_PCIE_SPEED_80 4
+
+extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
+
+#define	drm_can_sleep()	(DRM_HZ & 1)
+
+/* FreeBSD specific -- should be moved to drm_os_freebsd.h */
+
+#define	DRM_GEM_MAPPING_MASK	(3ULL << 62)
+#define	DRM_GEM_MAPPING_KEY	(2ULL << 62) /* Non-canonical address form */
+#define	DRM_GEM_MAX_IDX		0x3fffff
+#define	DRM_GEM_MAPPING_IDX(o)	(((o) >> 40) & DRM_GEM_MAX_IDX)
+#define	DRM_GEM_MAPPING_OFF(i)	(((uint64_t)(i)) << 40)
+#define	DRM_GEM_MAPPING_MAPOFF(o) \
+    ((o) & ~(DRM_GEM_MAPPING_OFF(DRM_GEM_MAX_IDX) | DRM_GEM_MAPPING_KEY))
+
+SYSCTL_DECL(_hw_drm);
+
+#define DRM_DEV_MODE	(S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID	0
+#define DRM_DEV_GID	0
+
+#define DRM_WAKEUP(w)		wakeup((void *)w)
+#define DRM_WAKEUP_INT(w)	wakeup(w)
+#define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0)
+
+#define DRM_CURPROC		curthread
+#define DRM_STRUCTPROC		struct thread
+#define DRM_SPINTYPE		struct mtx
+#define DRM_SPININIT(l,name)	mtx_init(l, name, NULL, MTX_DEF)
+#define DRM_SPINUNINIT(l)	mtx_destroy(l)
+#define DRM_SPINLOCK(l)		mtx_lock(l)
+#define DRM_SPINUNLOCK(u)	mtx_unlock(u)
+#define DRM_SPINLOCK_IRQSAVE(l, irqflags) do {		\
+	mtx_lock(l);					\
+	(void)irqflags;					\
+} while (0)
+#define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u)
+#define DRM_SPINLOCK_ASSERT(l)	mtx_assert(l, MA_OWNED)
+#define	DRM_LOCK_SLEEP(dev, chan, flags, msg, timeout)			\
+    (sx_sleep((chan), &(dev)->dev_struct_lock, (flags), (msg), (timeout)))
+#if defined(INVARIANTS)
+#define	DRM_LOCK_ASSERT(dev)	sx_assert(&(dev)->dev_struct_lock, SA_XLOCKED)
+#define	DRM_UNLOCK_ASSERT(dev)	sx_assert(&(dev)->dev_struct_lock, SA_UNLOCKED)
+#else
+#define	DRM_LOCK_ASSERT(d)
+#define	DRM_UNLOCK_ASSERT(d)
+#endif
+
+#define DRM_SYSCTL_HANDLER_ARGS	(SYSCTL_HANDLER_ARGS)
+
+enum {
+	DRM_IS_NOT_AGP,
+	DRM_IS_AGP,
+	DRM_MIGHT_BE_AGP
+};
+
+#define DRM_VERIFYAREA_READ( uaddr, size )		\
+	(!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ))
+
+#define DRM_COPY_TO_USER(user, kern, size) \
+	copyout(kern, user, size)
+#define DRM_COPY_FROM_USER(kern, user, size) \
+	copyin(user, kern, size)
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) 	\
+	copyin(arg2, arg1, arg3)
+#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)	\
+	copyout(arg2, arg1, arg3)
+#define DRM_GET_USER_UNCHECKED(val, uaddr)		\
+	((val) = fuword32(uaddr), 0)
+
+#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do {	\
+	(_map) = (_dev)->context_sareas[_ctx];		\
+} while(0)
+
+/* Returns -errno to shared code */
+#define DRM_WAIT_ON( ret, queue, timeout, condition )		\
+for ( ret = 0 ; !ret && !(condition) ; ) {			\
+	DRM_UNLOCK(dev);					\
+	mtx_lock(&dev->irq_lock);				\
+	if (!(condition))					\
+	    ret = -mtx_sleep(&(queue), &dev->irq_lock, 		\
+		PCATCH, "drmwtq", (timeout));			\
+	    if (ret == -ERESTART)				\
+	        ret = -ERESTARTSYS;				\
+	mtx_unlock(&dev->irq_lock);				\
+	DRM_LOCK(dev);						\
 }
 
-#define KIB_NOTYET()							\
-do {									\
-	if (drm_debug_flag && drm_notyet_flag)				\
-		printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \
+#define	dev_err(dev, fmt, ...)						\
+	device_printf((dev), "error: " fmt, ## __VA_ARGS__)
+#define	dev_warn(dev, fmt, ...)						\
+	device_printf((dev), "warning: " fmt, ## __VA_ARGS__)
+#define	dev_info(dev, fmt, ...)						\
+	device_printf((dev), "info: " fmt, ## __VA_ARGS__)
+#define	dev_dbg(dev, fmt, ...) do {					\
+	if ((drm_debug& DRM_DEBUGBITS_KMS) != 0) {			\
+		device_printf((dev), "debug: " fmt, ## __VA_ARGS__);	\
+	}								\
 } while (0)
 
-#define	KTR_DRM		KTR_DEV
-#define	KTR_DRM_REG	KTR_SPARE3
+struct drm_msi_blacklist_entry
+{
+	int vendor;
+	int device;
+};
 
-#endif /* __KERNEL__ */
-#endif /* _DRM_P_H_ */
+struct drm_vblank_info {
+	wait_queue_head_t queue;	/* vblank wait queue */
+	atomic_t count;			/* number of VBLANK interrupts */
+					/* (driver must alloc the right number of counters) */
+	atomic_t refcount;		/* number of users of vblank interrupts */
+	u32 last;			/* protected by dev->vbl_lock, used */
+					/* for wraparound handling */
+	int enabled;			/* so we don't call enable more than */
+					/* once per disable */
+	int inmodeset;			/* Display driver is setting mode */
+};
+
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
+#endif
+
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
+enum dmi_field {
+        DMI_NONE,
+        DMI_BIOS_VENDOR,
+        DMI_BIOS_VERSION,
+        DMI_BIOS_DATE,
+        DMI_SYS_VENDOR,
+        DMI_PRODUCT_NAME,
+        DMI_PRODUCT_VERSION,
+        DMI_PRODUCT_SERIAL,
+        DMI_PRODUCT_UUID,
+        DMI_BOARD_VENDOR,
+        DMI_BOARD_NAME,
+        DMI_BOARD_VERSION,
+        DMI_BOARD_SERIAL,
+        DMI_BOARD_ASSET_TAG,
+        DMI_CHASSIS_VENDOR,
+        DMI_CHASSIS_TYPE,
+        DMI_CHASSIS_VERSION,
+        DMI_CHASSIS_SERIAL,
+        DMI_CHASSIS_ASSET_TAG,
+        DMI_STRING_MAX,
+};
+
+struct dmi_strmatch {
+	unsigned char slot;
+	char substr[79];
+};
+
+struct dmi_system_id {
+        int (*callback)(const struct dmi_system_id *);
+        const char *ident;
+        struct dmi_strmatch matches[4];
+};
+#define	DMI_MATCH(a, b) {(a), (b)}
+bool dmi_check_system(const struct dmi_system_id *);
+
+/* Device setup support (drm_drv.c) */
+int	drm_probe_helper(device_t kdev, drm_pci_id_list_t *idlist);
+int	drm_attach_helper(device_t kdev, drm_pci_id_list_t *idlist,
+	    struct drm_driver *driver);
+int	drm_generic_detach(device_t kdev);
+
+void drm_event_wakeup(struct drm_pending_event *e);
+
+int drm_add_busid_modesetting(struct drm_device *dev,
+    struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
+
+/* Buffer management support (drm_bufs.c) */
+unsigned long drm_get_resource_start(struct drm_device *dev,
+				     unsigned int resource);
+unsigned long drm_get_resource_len(struct drm_device *dev,
+				   unsigned int resource);
+
+/* IRQ support (drm_irq.c) */
+irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+void	drm_driver_irq_preinstall(struct drm_device *dev);
+void	drm_driver_irq_postinstall(struct drm_device *dev);
+void	drm_driver_irq_uninstall(struct drm_device *dev);
+
+/* sysctl support (drm_sysctl.h) */
+extern int		drm_sysctl_init(struct drm_device *dev);
+extern int		drm_sysctl_cleanup(struct drm_device *dev);
+
+int	drm_version(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv);
+
+/* consistent PCI memory functions (drm_pci.c) */
+int	drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+int	drm_pci_set_unique(struct drm_device *dev, struct drm_master *master,
+	    struct drm_unique *u);
+int	drm_pci_agp_init(struct drm_device *dev);
+int	drm_pci_enable_msi(struct drm_device *dev);
+void	drm_pci_disable_msi(struct drm_device *dev);
+
+struct ttm_bo_device;
+int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset,
+    vm_size_t size, struct vm_object **obj_res, int nprot);
+struct ttm_buffer_object;
+void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
+
+#endif				/* __KERNEL__ */
+#endif

Modified: trunk/sys/dev/drm2/drm_agpsupport.c
===================================================================
--- trunk/sys/dev/drm2/drm_agpsupport.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_agpsupport.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,13 @@
 /* $MidnightBSD$ */
-/*-
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,64 +30,36 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Author:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_agpsupport.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_agpsupport.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/** @file drm_agpsupport.c
- * Support code for tying the kernel AGP support to DRM drivers and
- * the DRM's AGP ioctls.
- */
-
 #include <dev/drm2/drmP.h>
 
-#include <dev/agp/agpreg.h>
-#include <dev/pci/pcireg.h>
+#if __OS_HAS_AGP
 
-/* Returns 1 if AGP or 0 if not. */
-static int
-drm_device_find_capability(struct drm_device *dev, int cap)
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
 {
+	DRM_AGP_KERN *kern;
 
-	return (pci_find_cap(dev->device, cap, NULL) == 0);
-}
-
-int drm_device_is_agp(struct drm_device *dev)
-{
-	if (dev->driver->device_is_agp != NULL) {
-		int ret;
-
-		/* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
-		 * AGP, 2 = fall back to PCI capability
-		 */
-		ret = (*dev->driver->device_is_agp)(dev);
-		if (ret != DRM_MIGHT_BE_AGP)
-			return ret;
-	}
-
-	return (drm_device_find_capability(dev, PCIY_AGP));
-}
-
-int drm_device_is_pcie(struct drm_device *dev)
-{
-	return (drm_device_find_capability(dev, PCIY_EXPRESS));
-}
-
-int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info)
-{
-	struct agp_info *kern;
-
 	if (!dev->agp || !dev->agp->acquired)
-		return EINVAL;
+		return -EINVAL;
 
-	kern                   = &dev->agp->info;
-	agp_get_info(dev->agp->agpdev, kern);
+	kern = &dev->agp->agp_info;
+	agp_get_info(dev->agp->bridge, kern);
 	info->agp_version_major = 1;
 	info->agp_version_minor = 0;
 	info->mode              = kern->ai_mode;
@@ -93,343 +73,397 @@
 	return 0;
 }
 
+EXPORT_SYMBOL(drm_agp_info);
+
 int drm_agp_info_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
+	struct drm_agp_info *info = data;
 	int err;
-	struct drm_agp_info info;
 
-	err = drm_agp_info(dev, &info);
-	if (err != 0)
+	err = drm_agp_info(dev, info);
+	if (err)
 		return err;
 
-	*(struct drm_agp_info *) data = info;
 	return 0;
 }
 
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
-			  struct drm_file *file_priv)
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
 {
-
-	return drm_agp_acquire(dev);
-}
-
-int drm_agp_acquire(struct drm_device *dev)
-{
 	int retcode;
 
-	if (!dev->agp || dev->agp->acquired)
-		return EINVAL;
-
-	retcode = agp_acquire(dev->agp->agpdev);
+	if (!dev->agp)
+		return -ENODEV;
+	if (dev->agp->acquired)
+		return -EBUSY;
+	retcode = agp_acquire(dev->agp->bridge);
 	if (retcode)
-		return retcode;
-
+		return -retcode;
 	dev->agp->acquired = 1;
 	return 0;
 }
 
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+EXPORT_SYMBOL(drm_agp_acquire);
+
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv)
 {
-
-	return drm_agp_release(dev);
+	return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
 }
 
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
 int drm_agp_release(struct drm_device * dev)
 {
 	if (!dev->agp || !dev->agp->acquired)
-		return EINVAL;
-	agp_release(dev->agp->agpdev);
+		return -EINVAL;
+	agp_release(dev->agp->bridge);
 	dev->agp->acquired = 0;
 	return 0;
 }
+EXPORT_SYMBOL(drm_agp_release);
 
-int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
 {
+	return drm_agp_release(dev);
+}
 
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+{
 	if (!dev->agp || !dev->agp->acquired)
-		return EINVAL;
-	
-	dev->agp->mode    = mode.mode;
-	agp_enable(dev->agp->agpdev, mode.mode);
+		return -EINVAL;
+
+	dev->agp->mode = mode.mode;
+	agp_enable(dev->agp->bridge, mode.mode);
 	dev->agp->enabled = 1;
 	return 0;
 }
 
+EXPORT_SYMBOL(drm_agp_enable);
+
 int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
 {
-	struct drm_agp_mode mode;
+	struct drm_agp_mode *mode = data;
 
-	mode = *(struct drm_agp_mode *) data;
-
-	return drm_agp_enable(dev, mode);
+	return drm_agp_enable(dev, *mode);
 }
 
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
+ */
 int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 {
-	drm_agp_mem_t    *entry;
-	void	         *handle;
-	unsigned long    pages;
-	u_int32_t	 type;
+	struct drm_agp_mem *entry;
+	DRM_AGP_MEM *memory;
+	unsigned long pages;
+	u32 type;
 	struct agp_memory_info info;
 
 	if (!dev->agp || !dev->agp->acquired)
-		return EINVAL;
+		return -EINVAL;
+	if (!(entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT)))
+		return -ENOMEM;
 
-	entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT | M_ZERO);
-	if (entry == NULL)
-		return ENOMEM;
+	memset(entry, 0, sizeof(*entry));
 
 	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
-	type = (u_int32_t) request->type;
-
-	DRM_UNLOCK(dev);
-	handle = drm_agp_allocate_memory(pages, type);
-	DRM_LOCK(dev);
-	if (handle == NULL) {
+	type = (u32) request->type;
+	if (!(memory = agp_alloc_memory(dev->agp->bridge, type, pages << PAGE_SHIFT))) {
 		free(entry, DRM_MEM_AGPLISTS);
-		return ENOMEM;
+		return -ENOMEM;
 	}
-	
-	entry->handle    = handle;
-	entry->bound     = 0;
-	entry->pages     = pages;
-	entry->prev      = NULL;
-	entry->next      = dev->agp->memory;
-	if (dev->agp->memory)
-		dev->agp->memory->prev = entry;
-	dev->agp->memory = entry;
 
-	agp_memory_info(dev->agp->agpdev, entry->handle, &info);
+	entry->handle = (unsigned long)memory;
+	entry->memory = memory;
+	entry->bound = 0;
+	entry->pages = pages;
+	list_add(&entry->head, &dev->agp->memory);
 
-	request->handle   = (unsigned long) entry->handle;
-        request->physical = info.ami_physical;
+	agp_memory_info(dev->agp->bridge, entry->memory, &info);
 
+	request->handle = entry->handle;
+	request->physical = info.ami_physical;
+
 	return 0;
 }
+EXPORT_SYMBOL(drm_agp_alloc);
 
+
 int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
-	struct drm_agp_buffer request;
-	int retcode;
+	struct drm_agp_buffer *request = data;
 
-	request = *(struct drm_agp_buffer *) data;
-
-	DRM_LOCK(dev);
-	retcode = drm_agp_alloc(dev, &request);
-	DRM_UNLOCK(dev);
-
-	*(struct drm_agp_buffer *) data = request;
-
-	return retcode;
+	return drm_agp_alloc(dev, request);
 }
 
-static drm_agp_mem_t * drm_agp_lookup_entry(struct drm_device *dev,
-					    void *handle)
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+					   unsigned long handle)
 {
-	drm_agp_mem_t *entry;
+	struct drm_agp_mem *entry;
 
-	for (entry = dev->agp->memory; entry; entry = entry->next) {
-		if (entry->handle == handle) return entry;
+	list_for_each_entry(entry, &dev->agp->memory, head) {
+		if (entry->handle == handle)
+			return entry;
 	}
 	return NULL;
 }
 
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
 int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
 {
-	drm_agp_mem_t     *entry;
-	int retcode;
+	struct drm_agp_mem *entry;
+	int ret;
 
 	if (!dev->agp || !dev->agp->acquired)
-		return EINVAL;
-
-	entry = drm_agp_lookup_entry(dev, (void *)request->handle);
-	if (entry == NULL || !entry->bound)
-		return EINVAL;
-
-	DRM_UNLOCK(dev);
-	retcode = drm_agp_unbind_memory(entry->handle);
-	DRM_LOCK(dev);
-
-	if (retcode == 0)
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (!entry->bound)
+		return -EINVAL;
+	ret = drm_unbind_agp(entry->memory);
+	if (ret == 0)
 		entry->bound = 0;
-
-	return retcode;
+	return ret;
 }
+EXPORT_SYMBOL(drm_agp_unbind);
 
+
 int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
 {
-	struct drm_agp_binding request;
-	int retcode;
+	struct drm_agp_binding *request = data;
 
-	request = *(struct drm_agp_binding *) data;
-
-	DRM_LOCK(dev);
-	retcode = drm_agp_unbind(dev, &request);
-	DRM_UNLOCK(dev);
-
-	return retcode;
+	return drm_agp_unbind(dev, request);
 }
 
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
 int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
 {
-	drm_agp_mem_t     *entry;
-	int               retcode;
-	int               page;
-	
+	struct drm_agp_mem *entry;
+	int retcode;
+	int page;
+
 	if (!dev->agp || !dev->agp->acquired)
-		return EINVAL;
-
-	DRM_DEBUG("agp_bind, page_size=%x\n", (int)PAGE_SIZE);
-
-	entry = drm_agp_lookup_entry(dev, (void *)request->handle);
-	if (entry == NULL || entry->bound)
-		return EINVAL;
-
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (entry->bound)
+		return -EINVAL;
 	page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+	if ((retcode = drm_bind_agp(entry->memory, page)))
+		return retcode;
+	entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+	DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+		  dev->agp->base, entry->bound);
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_bind);
 
-	DRM_UNLOCK(dev);
-	retcode = drm_agp_bind_memory(entry->handle, page);
-	DRM_LOCK(dev);
-	if (retcode == 0)
-		entry->bound = dev->agp->base + (page << PAGE_SHIFT);
 
-	return retcode;
-}
-
 int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
-	struct drm_agp_binding request;
-	int retcode;
+	struct drm_agp_binding *request = data;
 
-	request = *(struct drm_agp_binding *) data;
-
-	DRM_LOCK(dev);
-	retcode = drm_agp_bind(dev, &request);
-	DRM_UNLOCK(dev);
-
-	return retcode;
+	return drm_agp_bind(dev, request);
 }
 
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
 int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
 {
-	drm_agp_mem_t    *entry;
-	
+	struct drm_agp_mem *entry;
+
 	if (!dev->agp || !dev->agp->acquired)
-		return EINVAL;
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (entry->bound)
+		drm_unbind_agp(entry->memory);
 
-	entry = drm_agp_lookup_entry(dev, (void*)request->handle);
-	if (entry == NULL)
-		return EINVAL;
-   
-	if (entry->prev)
-		entry->prev->next = entry->next;
-	else
-		dev->agp->memory  = entry->next;
-	if (entry->next)
-		entry->next->prev = entry->prev;
+	list_del(&entry->head);
 
-	DRM_UNLOCK(dev);
-	if (entry->bound)
-		drm_agp_unbind_memory(entry->handle);
-	drm_agp_free_memory(entry->handle);
-	DRM_LOCK(dev);
-
+	drm_free_agp(entry->memory, entry->pages);
 	free(entry, DRM_MEM_AGPLISTS);
-
 	return 0;
-
 }
+EXPORT_SYMBOL(drm_agp_free);
 
+
+
 int drm_agp_free_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
-	struct drm_agp_buffer request;
-	int retcode;
+	struct drm_agp_buffer *request = data;
 
-	request = *(struct drm_agp_buffer *) data;
-
-	DRM_LOCK(dev);
-	retcode = drm_agp_free(dev, &request);
-	DRM_UNLOCK(dev);
-
-	return retcode;
+	return drm_agp_free(dev, request);
 }
 
-drm_agp_head_t *drm_agp_init(void)
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 {
-	device_t agpdev;
-	drm_agp_head_t *head   = NULL;
-	int      agp_available = 1;
-   
-	agpdev = DRM_AGP_FIND_DEVICE();
-	if (!agpdev)
-		agp_available = 0;
+	struct drm_agp_head *head = NULL;
 
-	DRM_DEBUG("agp_available = %d\n", agp_available);
-
-	if (agp_available) {
-		head = malloc(sizeof(*head), DRM_MEM_AGPLISTS,
-		    M_NOWAIT | M_ZERO);
-		if (head == NULL)
-			return NULL;
-		head->agpdev = agpdev;
-		agp_get_info(agpdev, &head->info);
-		head->base = head->info.ai_aperture_base;
-		head->memory = NULL;
-		DRM_INFO("AGP at 0x%08lx %dMB\n",
-			 (long)head->info.ai_aperture_base,
-			 (int)(head->info.ai_aperture_size >> 20));
+	if (!(head = malloc(sizeof(*head), DRM_MEM_AGPLISTS, M_NOWAIT)))
+		return NULL;
+	memset((void *)head, 0, sizeof(*head));
+	head->bridge = agp_find_device();
+	if (!head->bridge) {
+		free(head, DRM_MEM_AGPLISTS);
+		return NULL;
+	} else {
+		agp_get_info(head->bridge, &head->agp_info);
 	}
+	INIT_LIST_HEAD(&head->memory);
+	head->cant_use_aperture = 0;
+	head->base = head->agp_info.ai_aperture_base;
 	return head;
 }
 
-void *drm_agp_allocate_memory(size_t pages, u32 type)
+#ifdef FREEBSD_NOTYET
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+		   struct page **pages,
+		   unsigned long num_pages,
+		   uint32_t gtt_offset,
+		   u32 type)
 {
-	device_t agpdev;
+	DRM_AGP_MEM *mem;
+	int ret, i;
 
-	agpdev = DRM_AGP_FIND_DEVICE();
-	if (!agpdev)
+	DRM_DEBUG("\n");
+
+	mem = agp_allocate_memory(dev->agp->bridge, num_pages,
+				      type);
+	if (mem == NULL) {
+		DRM_ERROR("Failed to allocate memory for %ld pages\n",
+			  num_pages);
 		return NULL;
+	}
 
-	return agp_alloc_memory(agpdev, type, pages << AGP_PAGE_SHIFT);
-}
+	for (i = 0; i < num_pages; i++)
+		mem->pages[i] = pages[i];
+	mem->page_count = num_pages;
 
-int drm_agp_free_memory(void *handle)
-{
-	device_t agpdev;
+	mem->is_flushed = true;
+	ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+	if (ret != 0) {
+		DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+		agp_free_memory(mem);
+		return NULL;
+	}
 
-	agpdev = DRM_AGP_FIND_DEVICE();
-	if (!agpdev || !handle)
-		return 0;
-
-	agp_free_memory(agpdev, handle);
-	return 1;
+	return mem;
 }
+EXPORT_SYMBOL(drm_agp_bind_pages);
+#endif /* FREEBSD_NOTYET */
 
-int drm_agp_bind_memory(void *handle, off_t start)
-{
-	device_t agpdev;
-
-	agpdev = DRM_AGP_FIND_DEVICE();
-	if (!agpdev || !handle)
-		return EINVAL;
-
-	return agp_bind_memory(agpdev, handle, start * PAGE_SIZE);
-}
-
-int drm_agp_unbind_memory(void *handle)
-{
-	device_t agpdev;
-
-	agpdev = DRM_AGP_FIND_DEVICE();
-	if (!agpdev || !handle)
-		return EINVAL;
-
-	return agp_unbind_memory(agpdev, handle);
-}
+#endif /* __OS_HAS_AGP */

Modified: trunk/sys/dev/drm2/drm_atomic.h
===================================================================
--- trunk/sys/dev/drm2/drm_atomic.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_atomic.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -8,6 +8,7 @@
 
 /*-
  * Copyright 2004 Eric Anholt
+ * Copyright 2013 Jung-uk Kim <jkim at FreeBSD.org>
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,64 +32,63 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_atomic.h 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_atomic.h 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/* Many of these implementations are rather fake, but good enough. */
+typedef u_int		atomic_t;
+typedef uint64_t	atomic64_t;
 
-typedef u_int32_t atomic_t;
+#define	NB_BITS_PER_LONG		(sizeof(long) * NBBY)
+#define	BITS_TO_LONGS(x)		howmany(x, NB_BITS_PER_LONG)
 
-#define atomic_set(p, v)	(*(p) = (v))
-#define atomic_read(p)		(*(p))
-#define atomic_inc(p)		atomic_add_int(p, 1)
-#define atomic_dec(p)		atomic_subtract_int(p, 1)
-#define atomic_add(n, p)	atomic_add_int(p, n)
-#define atomic_sub(n, p)	atomic_subtract_int(p, n)
+#define	atomic_read(p)			(*(volatile u_int *)(p))
+#define	atomic_set(p, v)		do { *(u_int *)(p) = (v); } while (0)
 
-static __inline atomic_t
-test_and_set_bit(int b, volatile void *p)
-{
-	int s = splhigh();
-	unsigned int m = 1<<b;
-	unsigned int r = *(volatile int *)p & m;
-	*(volatile int *)p |= m;
-	splx(s);
-	return r;
-}
+#define	atomic64_read(p)		atomic_load_acq_64(p)
+#define	atomic64_set(p, v)		atomic_store_rel_64(p, v)
 
-static __inline void
-clear_bit(int b, volatile void *p)
-{
-	atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
-}
+#define	atomic_add(v, p)		atomic_add_int(p, v)
+#define	atomic_sub(v, p)		atomic_subtract_int(p, v)
+#define	atomic_inc(p)			atomic_add(1, p)
+#define	atomic_dec(p)			atomic_sub(1, p)
 
-static __inline void
-set_bit(int b, volatile void *p)
-{
-	atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
-}
+#define	atomic_add_return(v, p)		(atomic_fetchadd_int(p, v) + (v))
+#define	atomic_sub_return(v, p)		(atomic_fetchadd_int(p, -(v)) - (v))
+#define	atomic_inc_return(p)		atomic_add_return(1, p)
+#define	atomic_dec_return(p)		atomic_sub_return(1, p)
 
-static __inline int
-test_bit(int b, volatile void *p)
-{
-	return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f));
-}
+#define	atomic_add_and_test(v, p)	(atomic_add_return(v, p) == 0)
+#define	atomic_sub_and_test(v, p)	(atomic_sub_return(v, p) == 0)
+#define	atomic_inc_and_test(p)		(atomic_inc_return(p) == 0)
+#define	atomic_dec_and_test(p)		(atomic_dec_return(p) == 0)
 
-static __inline int
-find_first_zero_bit(volatile void *p, int max)
+#define	atomic_xchg(p, v)		atomic_swap_int(p, v)
+#define	atomic64_xchg(p, v)		atomic_swap_64(p, v)
+
+#define	__bit_word(b)			((b) / NB_BITS_PER_LONG)
+#define	__bit_mask(b)			(1UL << (b) % NB_BITS_PER_LONG)
+#define	__bit_addr(p, b)		((volatile u_long *)(p) + __bit_word(b))
+
+#define	clear_bit(b, p) \
+    atomic_clear_long(__bit_addr(p, b), __bit_mask(b))
+#define	set_bit(b, p) \
+    atomic_set_long(__bit_addr(p, b), __bit_mask(b))
+#define	test_bit(b, p) \
+    ((*__bit_addr(p, b) & __bit_mask(b)) != 0)
+#define	test_and_set_bit(b, p) \
+    (atomic_xchg((p), 1) != b)
+#define	cmpxchg(ptr, old, new) \
+    (atomic_cmpset_int((volatile u_int *)(ptr),(old),(new)) ? (old) : (0))
+
+static __inline u_long
+find_first_zero_bit(const u_long *p, u_long max)
 {
-	int b;
-	volatile int *ptr = (volatile int *)p;
+	u_long i, n;
 
-	for (b = 0; b < max; b += 32) {
-		if (ptr[b >> 5] != ~0) {
-			for (;;) {
-				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
-					return b;
-				b++;
-			}
-		}
+	KASSERT(max % NB_BITS_PER_LONG == 0, ("invalid bitmap size %lu", max));
+	for (i = 0; i < max / NB_BITS_PER_LONG; i++) {
+		n = ~p[i];
+		if (n != 0)
+			return (i * NB_BITS_PER_LONG + ffsl(n) - 1);
 	}
-	return max;
+	return (max);
 }
-
-#define	BITS_TO_LONGS(x) (howmany((x), NBBY * sizeof(long)))

Modified: trunk/sys/dev/drm2/drm_auth.c
===================================================================
--- trunk/sys/dev/drm2/drm_auth.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_auth.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,15 @@
 /* $MidnightBSD$ */
-/*-
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith at valinux.com
+ *
  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,121 +32,118 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_auth.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_auth.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/** @file drm_auth.c
- * Implementation of the get/authmagic ioctls implementing the authentication
- * scheme between the master and clients.
- */
-
 #include <dev/drm2/drmP.h>
 
-static int drm_hash_magic(drm_magic_t magic)
-{
-	return magic & (DRM_HASH_SIZE-1);
-}
+static struct mtx drm_magic_lock;
 
 /**
- * Returns the file private associated with the given magic number.
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
  */
-static struct drm_file *drm_find_file(struct drm_device *dev, drm_magic_t magic)
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
 {
-	drm_magic_entry_t *pt;
-	int hash = drm_hash_magic(magic);
+	struct drm_file *retval = NULL;
+	struct drm_magic_entry *pt;
+	struct drm_hash_item *hash;
+	struct drm_device *dev = master->minor->dev;
 
-	DRM_LOCK_ASSERT(dev);
-
-	for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
-		if (pt->magic == magic) {
-			return pt->priv;
-		}
+	DRM_LOCK(dev);
+	if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+		pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+		retval = pt->priv;
 	}
-
-	return NULL;
+	DRM_UNLOCK(dev);
+	return retval;
 }
 
 /**
- * Inserts the given magic number into the hash table of used magic number
- * lists.
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
  */
-static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
 			 drm_magic_t magic)
 {
-	int		  hash;
-	drm_magic_entry_t *entry;
-
+	struct drm_magic_entry *entry;
+	struct drm_device *dev = master->minor->dev;
 	DRM_DEBUG("%d\n", magic);
 
-	DRM_LOCK_ASSERT(dev);
-
-	hash = drm_hash_magic(magic);
 	entry = malloc(sizeof(*entry), DRM_MEM_MAGIC, M_ZERO | M_NOWAIT);
 	if (!entry)
-		return ENOMEM;
-	entry->magic = magic;
-	entry->priv  = priv;
-	entry->next  = NULL;
+		return -ENOMEM;
+	entry->priv = priv;
+	entry->hash_item.key = (unsigned long)magic;
+	DRM_LOCK(dev);
+	drm_ht_insert_item(&master->magiclist, &entry->hash_item);
+	list_add_tail(&entry->head, &master->magicfree);
+	DRM_UNLOCK(dev);
 
-	if (dev->magiclist[hash].tail) {
-		dev->magiclist[hash].tail->next = entry;
-		dev->magiclist[hash].tail	= entry;
-	} else {
-		dev->magiclist[hash].head	= entry;
-		dev->magiclist[hash].tail	= entry;
-	}
-
 	return 0;
 }
 
 /**
- * Removes the given magic number from the hash table of used magic number
- * lists.
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
  */
-static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
 {
-	drm_magic_entry_t *prev = NULL;
-	drm_magic_entry_t *pt;
-	int		  hash;
+	struct drm_magic_entry *pt;
+	struct drm_hash_item *hash;
+	struct drm_device *dev = master->minor->dev;
 
-	DRM_LOCK_ASSERT(dev);
-
 	DRM_DEBUG("%d\n", magic);
-	hash = drm_hash_magic(magic);
 
-	for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
-		if (pt->magic == magic) {
-			if (dev->magiclist[hash].head == pt) {
-				dev->magiclist[hash].head = pt->next;
-			}
-			if (dev->magiclist[hash].tail == pt) {
-				dev->magiclist[hash].tail = prev;
-			}
-			if (prev) {
-				prev->next = pt->next;
-			}
-			free(pt, DRM_MEM_MAGIC);
-			return 0;
-		}
+	DRM_LOCK(dev);
+	if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+		DRM_UNLOCK(dev);
+		return -EINVAL;
 	}
+	pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+	drm_ht_remove_item(&master->magiclist, hash);
+	list_del(&pt->head);
+	DRM_UNLOCK(dev);
 
-	return EINVAL;
+	free(pt, DRM_MEM_MAGIC);
+
+	return 0;
 }
 
 /**
- * Called by the client, this returns a unique magic number to be authorized
- * by the master.
+ * Get a unique magic number (ioctl).
  *
- * The master may use its own knowledge of the client (such as the X
- * connection that the magic is passed over) to determine if the magic number
- * should be authenticated.
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
  */
 int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
@@ -147,18 +154,15 @@
 	if (file_priv->magic) {
 		auth->magic = file_priv->magic;
 	} else {
-		DRM_LOCK(dev);
 		do {
-			int old = sequence;
-
-			auth->magic = old+1;
-
-			if (!atomic_cmpset_int(&sequence, old, auth->magic))
-				continue;
-		} while (drm_find_file(dev, auth->magic));
+			mtx_lock(&drm_magic_lock);
+			if (!sequence)
+				++sequence;	/* reserve 0 */
+			auth->magic = sequence++;
+			mtx_unlock(&drm_magic_lock);
+		} while (drm_find_file(file_priv->master, auth->magic));
 		file_priv->magic = auth->magic;
-		drm_add_magic(dev, file_priv, auth->magic);
-		DRM_UNLOCK(dev);
+		drm_add_magic(file_priv->master, file_priv, auth->magic);
 	}
 
 	DRM_DEBUG("%u\n", auth->magic);
@@ -167,25 +171,47 @@
 }
 
 /**
- * Marks the client associated with the given magic number as authenticated.
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
  */
 int drm_authmagic(struct drm_device *dev, void *data,
 		  struct drm_file *file_priv)
 {
 	struct drm_auth *auth = data;
-	struct drm_file *priv;
+	struct drm_file *file;
 
 	DRM_DEBUG("%u\n", auth->magic);
-
-	DRM_LOCK(dev);
-	priv = drm_find_file(dev, auth->magic);
-	if (priv != NULL) {
-		priv->authenticated = 1;
-		drm_remove_magic(dev, auth->magic);
-		DRM_UNLOCK(dev);
+	if ((file = drm_find_file(file_priv->master, auth->magic))) {
+		file->authenticated = 1;
+		drm_remove_magic(file_priv->master, auth->magic);
 		return 0;
-	} else {
-		DRM_UNLOCK(dev);
-		return EINVAL;
 	}
+	return -EINVAL;
 }
+
+static int
+drm_magic_init(void *arg)
+{
+
+	mtx_init(&drm_magic_lock, "drm_getmagic__lock", NULL, MTX_DEF);
+	return (0);
+}
+
+static void
+drm_magic_fini(void *arg)
+{
+
+	mtx_destroy(&drm_magic_lock);
+}
+
+SYSINIT(drm_magic_init, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_magic_init, NULL);
+SYSUNINIT(drm_magic_fini, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_magic_fini, NULL);

Added: trunk/sys/dev/drm2/drm_buffer.c
===================================================================
--- trunk/sys/dev/drm2/drm_buffer.c	                        (rev 0)
+++ trunk/sys/dev/drm2/drm_buffer.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,188 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_buffer.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drm_buffer.h>
+
+/**
+ * Allocate the drm buffer object.
+ *
+ *   buf: Pointer to a pointer where the object is stored.
+ *   size: The number of bytes to allocate.
+ */
+int drm_buffer_alloc(struct drm_buffer **buf, int size)
+{
+	int nr_pages = size / PAGE_SIZE + 1;
+	int idx;
+
+	/* Allocating pointer table to end of structure makes drm_buffer
+	 * variable sized */
+	*buf = malloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
+			DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
+
+	if (*buf == NULL) {
+		DRM_ERROR("Failed to allocate drm buffer object to hold"
+				" %d bytes in %d pages.\n",
+				size, nr_pages);
+		return -ENOMEM;
+	}
+
+	(*buf)->size = size;
+
+	for (idx = 0; idx < nr_pages; ++idx) {
+
+		(*buf)->data[idx] =
+			malloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
+				DRM_MEM_DRIVER, M_WAITOK);
+
+
+		if ((*buf)->data[idx] == NULL) {
+			DRM_ERROR("Failed to allocate %dth page for drm"
+					" buffer with %d bytes and %d pages.\n",
+					idx + 1, size, nr_pages);
+			goto error_out;
+		}
+
+	}
+
+	return 0;
+
+error_out:
+
+	/* Only last element can be null pointer so check for it first. */
+	if ((*buf)->data[idx])
+		free((*buf)->data[idx], DRM_MEM_DRIVER);
+
+	for (--idx; idx >= 0; --idx)
+		free((*buf)->data[idx], DRM_MEM_DRIVER);
+
+	free(*buf, DRM_MEM_DRIVER);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_buffer_alloc);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ *   user_data: A pointer the data that is copied to the buffer.
+ *   size: The Number of bytes to copy.
+ */
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+			      void __user *user_data, int size)
+{
+	int nr_pages = size / PAGE_SIZE + 1;
+	int idx;
+
+	if (size > buf->size) {
+		DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
+				" %d bytes space\n",
+				size, buf->size);
+		return -EFAULT;
+	}
+
+	for (idx = 0; idx < nr_pages; ++idx) {
+
+		if (DRM_COPY_FROM_USER(buf->data[idx],
+			(char *)user_data + idx * PAGE_SIZE,
+			min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
+			DRM_ERROR("Failed to copy user data (%p) to drm buffer"
+					" (%p) %dth page.\n",
+					user_data, buf, idx);
+			return -EFAULT;
+
+		}
+	}
+	buf->iterator = 0;
+	return 0;
+}
+EXPORT_SYMBOL(drm_buffer_copy_from_user);
+
+/**
+ * Free the drm buffer object
+ */
+void drm_buffer_free(struct drm_buffer *buf)
+{
+
+	if (buf != NULL) {
+
+		int nr_pages = buf->size / PAGE_SIZE + 1;
+		int idx;
+		for (idx = 0; idx < nr_pages; ++idx)
+			free(buf->data[idx], DRM_MEM_DRIVER);
+
+		free(buf, DRM_MEM_DRIVER);
+	}
+}
+EXPORT_SYMBOL(drm_buffer_free);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ *   objsize: The size of the objet in bytes.
+ *   stack_obj: A pointer to a memory location where object can be copied.
+ */
+void *drm_buffer_read_object(struct drm_buffer *buf,
+		int objsize, void *stack_obj)
+{
+	int idx = drm_buffer_index(buf);
+	int page = drm_buffer_page(buf);
+	void *obj = NULL;
+
+	if (idx + objsize <= PAGE_SIZE) {
+		obj = &buf->data[page][idx];
+	} else {
+		/* The object is split which forces copy to temporary object.*/
+		int beginsz = PAGE_SIZE - idx;
+		memcpy(stack_obj, &buf->data[page][idx], beginsz);
+
+		memcpy((char *)stack_obj + beginsz, &buf->data[page + 1][0],
+				objsize - beginsz);
+
+		obj = stack_obj;
+	}
+
+	drm_buffer_advance(buf, objsize);
+	return obj;
+}
+EXPORT_SYMBOL(drm_buffer_read_object);


Property changes on: trunk/sys/dev/drm2/drm_buffer.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/drm_buffer.h
===================================================================
--- trunk/sys/dev/drm2/drm_buffer.h	                        (rev 0)
+++ trunk/sys/dev/drm2/drm_buffer.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,152 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_buffer.h 254794 2013-08-24 16:14:20Z dumbbell $");
+
+#ifndef _DRM_BUFFER_H_
+#define _DRM_BUFFER_H_
+
+#include <dev/drm2/drmP.h>
+
+struct drm_buffer {
+	int iterator;
+	int size;
+	char *data[];
+};
+
+
+/**
+ * Return the index of page that buffer is currently pointing at.
+ */
+static inline int drm_buffer_page(struct drm_buffer *buf)
+{
+	return buf->iterator / PAGE_SIZE;
+}
+/**
+ * Return the index of the current byte in the page
+ */
+static inline int drm_buffer_index(struct drm_buffer *buf)
+{
+	return buf->iterator & (PAGE_SIZE - 1);
+}
+/**
+ * Return number of bytes that is left to process
+ */
+static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
+{
+	return buf->size - buf->iterator;
+}
+
+/**
+ * Advance the buffer iterator number of bytes that is given.
+ */
+static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
+{
+	buf->iterator += bytes;
+}
+
+/**
+ * Allocate the drm buffer object.
+ *
+ *   buf: A pointer to a pointer where the object is stored.
+ *   size: The number of bytes to allocate.
+ */
+extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ *   user_data: A pointer the data that is copied to the buffer.
+ *   size: The Number of bytes to copy.
+ */
+extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
+		void __user *user_data, int size);
+
+/**
+ * Free the drm buffer object
+ */
+extern void drm_buffer_free(struct drm_buffer *buf);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ *   objsize: The size of the objet in bytes.
+ *   stack_obj: A pointer to a memory location where object can be copied.
+ */
+extern void *drm_buffer_read_object(struct drm_buffer *buf,
+		int objsize, void *stack_obj);
+
+/**
+ * Returns the pointer to the dword which is offset number of elements from the
+ * current processing location.
+ *
+ * Caller must make sure that dword is not split in the buffer. This
+ * requirement is easily met if all the sizes of objects in buffer are
+ * multiples of dword and PAGE_SIZE is multiple dword.
+ *
+ * Call to this function doesn't change the processing location.
+ *
+ *   offset: The index of the dword relative to the internat iterator.
+ */
+static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
+		int offset)
+{
+	int iter = buffer->iterator + offset * 4;
+	return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
+}
+/**
+ * Returns the pointer to the dword which is offset number of elements from
+ * the current processing location.
+ *
+ * Call to this function doesn't change the processing location.
+ *
+ *   offset: The index of the byte relative to the internat iterator.
+ */
+static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
+		int offset)
+{
+	int iter = buffer->iterator + offset;
+	return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
+}
+
+#endif


Property changes on: trunk/sys/dev/drm2/drm_buffer.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/dev/drm2/drm_bufs.c
===================================================================
--- trunk/sys/dev/drm2/drm_bufs.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_bufs.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,15 @@
 /* $MidnightBSD$ */
-/*-
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth at valinux.com
+ *
  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,19 +32,13 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_bufs.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_bufs.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/** @file drm_bufs.c
- * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
- */
+#include <sys/param.h>
+#include <sys/shm.h>
 
 #include <dev/pci/pcireg.h>
 
@@ -49,8 +53,6 @@
 	struct resource *res;
 	int rid;
 
-	DRM_LOCK_ASSERT(dev);
-
 	if (resource >= DRM_MAX_PCI_RESOURCE) {
 		DRM_ERROR("Resource %d too large\n", resource);
 		return 1;
@@ -60,11 +62,9 @@
 		return 0;
 	}
 
-	DRM_UNLOCK(dev);
 	rid = PCIR_BAR(resource);
-	res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid,
+	res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
 	    RF_SHAREABLE);
-	DRM_LOCK(dev);
 	if (res == NULL) {
 		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
 		return 1;
@@ -81,120 +81,275 @@
 unsigned long drm_get_resource_start(struct drm_device *dev,
 				     unsigned int resource)
 {
+	unsigned long start;
+
+	mtx_lock(&dev->pcir_lock);
+
 	if (drm_alloc_resource(dev, resource) != 0)
 		return 0;
 
-	return rman_get_start(dev->pcir[resource]);
+	start = rman_get_start(dev->pcir[resource]);
+
+	mtx_unlock(&dev->pcir_lock);
+
+	return (start);
 }
 
 unsigned long drm_get_resource_len(struct drm_device *dev,
 				   unsigned int resource)
 {
+	unsigned long len;
+
+	mtx_lock(&dev->pcir_lock);
+
 	if (drm_alloc_resource(dev, resource) != 0)
 		return 0;
 
-	return rman_get_size(dev->pcir[resource]);
+	len = rman_get_size(dev->pcir[resource]);
+
+	mtx_unlock(&dev->pcir_lock);
+
+	return (len);
 }
 
-int drm_addmap(struct drm_device * dev, unsigned long offset,
-	       unsigned long size,
-    enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+						  struct drm_local_map *map)
 {
-	drm_local_map_t *map;
+	struct drm_map_list *entry;
+	list_for_each_entry(entry, &dev->maplist, head) {
+		/*
+		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
+		 * while PCI resources may live above that, we only compare the
+		 * lower 32 bits of the map offset for maps of type
+		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+		 * It is assumed that if a driver have more than one resource
+		 * of each type, the lower 32 bits are different.
+		 */
+		if (!entry->map ||
+		    map->type != entry->map->type ||
+		    entry->master != dev->primary->master)
+			continue;
+		switch (map->type) {
+		case _DRM_SHM:
+			if (map->flags != _DRM_CONTAINS_LOCK)
+				break;
+			return entry;
+		case _DRM_REGISTERS:
+		case _DRM_FRAME_BUFFER:
+			if ((entry->map->offset & 0xffffffff) ==
+			    (map->offset & 0xffffffff))
+				return entry;
+		default: /* Make gcc happy */
+			;
+		}
+		if (entry->map->offset == map->offset)
+			return entry;
+	}
+
+	return NULL;
+}
+
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
+			  unsigned long user_token, int hashed_handle, int shm)
+{
+	int use_hashed_handle, shift;
+	unsigned long add;
+
+#if (BITS_PER_LONG == 64)
+	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
+#elif (BITS_PER_LONG == 32)
+	use_hashed_handle = hashed_handle;
+#else
+#error Unsupported long size. Neither 64 nor 32 bits.
+#endif
+
+	if (!use_hashed_handle) {
+		int ret;
+		hash->key = user_token >> PAGE_SHIFT;
+		ret = drm_ht_insert_item(&dev->map_hash, hash);
+		if (ret != -EINVAL)
+			return ret;
+	}
+
+	shift = 0;
+	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
+	if (shm && (SHMLBA > PAGE_SIZE)) {
+		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
+
+		/* For shared memory, we have to preserve the SHMLBA
+		 * bits of the eventual vma->vm_pgoff value during
+		 * mmap().  Otherwise we run into cache aliasing problems
+		 * on some platforms.  On these platforms, the pgoff of
+		 * a mmap() request is used to pick a suitable virtual
+		 * address for the mmap() region such that it will not
+		 * cause cache aliasing problems.
+		 *
+		 * Therefore, make sure the SHMLBA relevant bits of the
+		 * hash value we use are equal to those in the original
+		 * kernel virtual address.
+		 */
+		shift = bits;
+		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
+	}
+
+	return drm_ht_just_insert_please(&dev->map_hash, hash,
+					 user_token, 32 - PAGE_SHIFT - 3,
+					 shift, add);
+}
+
+/**
+ * Core function to create a range of memory available for mapping by a
+ * non-root process.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
+			   unsigned int size, enum drm_map_type type,
+			   enum drm_map_flags flags,
+			   struct drm_map_list ** maplist)
+{
+	struct drm_local_map *map;
+	struct drm_map_list *list;
+	drm_dma_handle_t *dmah;
+	unsigned long user_token;
+	int ret;
 	int align;
-	/*drm_agp_mem_t *entry;
-	int valid;*/
 
+	map = malloc(sizeof(*map), DRM_MEM_MAPS, M_NOWAIT);
+	if (!map)
+		return -ENOMEM;
+
+	map->offset = offset;
+	map->size = size;
+	map->flags = flags;
+	map->type = type;
+
 	/* Only allow shared memory to be removable since we only keep enough
 	 * book keeping information about shared memory to allow for removal
 	 * when processes fork.
 	 */
-	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
-		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
-		return EINVAL;
+	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+		free(map, DRM_MEM_MAPS);
+		return -EINVAL;
 	}
-	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
-		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
-		    offset, size);
-		return EINVAL;
-	}
-	if (offset + size < offset) {
-		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
-		    offset, size);
-		return EINVAL;
-	}
+	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
+		  (unsigned long long)map->offset, map->size, map->type);
 
-	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
-	    size, type);
-
-	/* Check if this is just another version of a kernel-allocated map, and
-	 * just hand that back if so.
+	/* page-align _DRM_SHM maps. They are allocated here so there is no security
+	 * hole created by that and it works around various broken drivers that use
+	 * a non-aligned quantity to map the SAREA. --BenH
 	 */
-	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
-	    type == _DRM_SHM) {
-		TAILQ_FOREACH(map, &dev->maplist, link) {
-			if (map->type == type && (map->offset == offset ||
-			    (map->type == _DRM_SHM &&
-			    map->flags == _DRM_CONTAINS_LOCK))) {
-				map->size = size;
-				DRM_DEBUG("Found kernel map %d\n", type);
-				goto done;
-			}
-		}
-	}
-	DRM_UNLOCK(dev);
+	if (map->type == _DRM_SHM)
+		map->size = PAGE_ALIGN(map->size);
 
-	/* Allocate a new map structure, fill it in, and do any type-specific
-	 * initialization necessary.
+	/*
+	 * FreeBSD port note: FreeBSD's PAGE_MASK is the inverse of
+	 * Linux's one. That's why the test below doesn't inverse the
+	 * constant.
 	 */
-	map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
-	if (!map) {
-		DRM_LOCK(dev);
-		return ENOMEM;
+	if ((map->offset & ((resource_size_t)PAGE_MASK)) || (map->size & (PAGE_MASK))) {
+		free(map, DRM_MEM_MAPS);
+		return -EINVAL;
 	}
+	map->mtrr = -1;
+	map->handle = NULL;
 
-	map->offset = offset;
-	map->size = size;
-	map->type = type;
-	map->flags = flags;
-	map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
-	    DRM_MAP_HANDLE_SHIFT);
-
 	switch (map->type) {
 	case _DRM_REGISTERS:
-		map->virtual = drm_ioremap(dev, map);
-		if (!(map->flags & _DRM_WRITE_COMBINING))
-			break;
-		/* FALLTHROUGH */
 	case _DRM_FRAME_BUFFER:
-		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
-			map->mtrr = 1;
+#ifdef __linux__
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
+		if (map->offset + (map->size-1) < map->offset ||
+		    map->offset < virt_to_phys(high_memory)) {
+			kfree(map);
+			return -EINVAL;
+		}
+#endif
+#endif
+		/* Some drivers preinitialize some maps, without the X Server
+		 * needing to be aware of it.  Therefore, we just return success
+		 * when the server tries to create a duplicate map.
+		 */
+		list = drm_find_matching_map(dev, map);
+		if (list != NULL) {
+			if (list->map->size != map->size) {
+				DRM_DEBUG("Matching maps of type %d with "
+					  "mismatched sizes, (%ld vs %ld)\n",
+					  map->type, map->size,
+					  list->map->size);
+				list->map->size = map->size;
+			}
+
+			free(map, DRM_MEM_MAPS);
+			*maplist = list;
+			return 0;
+		}
+
+		if (drm_core_has_MTRR(dev)) {
+			if (map->type == _DRM_FRAME_BUFFER ||
+			    (map->flags & _DRM_WRITE_COMBINING)) {
+				if (drm_mtrr_add(
+				    map->offset, map->size,
+				    DRM_MTRR_WC) == 0)
+					map->mtrr = 1;
+			}
+		}
+		if (map->type == _DRM_REGISTERS) {
+			drm_core_ioremap(map, dev);
+			if (!map->handle) {
+				free(map, DRM_MEM_MAPS);
+				return -ENOMEM;
+			}
+		}
+
 		break;
 	case _DRM_SHM:
-		map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
+		list = drm_find_matching_map(dev, map);
+		if (list != NULL) {
+			if(list->map->size != map->size) {
+				DRM_DEBUG("Matching maps of type %d with "
+					  "mismatched sizes, (%ld vs %ld)\n",
+					  map->type, map->size, list->map->size);
+				list->map->size = map->size;
+			}
+
+			free(map, DRM_MEM_MAPS);
+			*maplist = list;
+			return 0;
+		}
+		map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
 		DRM_DEBUG("%lu %d %p\n",
-		    map->size, drm_order(map->size), map->virtual);
-		if (!map->virtual) {
+			  map->size, drm_order(map->size), map->handle);
+		if (!map->handle) {
 			free(map, DRM_MEM_MAPS);
-			DRM_LOCK(dev);
-			return ENOMEM;
+			return -ENOMEM;
 		}
-		map->offset = (unsigned long)map->virtual;
+		map->offset = (unsigned long)map->handle;
 		if (map->flags & _DRM_CONTAINS_LOCK) {
 			/* Prevent a 2nd X Server from creating a 2nd lock */
-			DRM_LOCK(dev);
-			if (dev->lock.hw_lock != NULL) {
-				DRM_UNLOCK(dev);
-				free(map->virtual, DRM_MEM_MAPS);
+			if (dev->primary->master->lock.hw_lock != NULL) {
+				free(map->handle, DRM_MEM_MAPS);
 				free(map, DRM_MEM_MAPS);
-				return EBUSY;
+				return -EBUSY;
 			}
-			dev->lock.hw_lock = map->virtual; /* Pointer to lock */
-			DRM_UNLOCK(dev);
+			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
 		}
 		break;
-	case _DRM_AGP:
-		/*valid = 0;*/
+	case _DRM_AGP: {
+		struct drm_agp_mem *entry;
+		int valid = 0;
+
+		if (!drm_core_has_AGP(dev)) {
+			free(map, DRM_MEM_MAPS);
+			return -EINVAL;
+		}
+#ifdef __linux__
+#ifdef __alpha__
+		map->offset += dev->hose->mem_space->start;
+#endif
+#endif
 		/* In some cases (i810 driver), user space may have already
 		 * added the AGP base itself, because dev->agp->base previously
 		 * only got set during AGP enable.  So, only add the base
@@ -203,128 +358,202 @@
 		 */
 		if (map->offset < dev->agp->base ||
 		    map->offset > dev->agp->base +
-		    dev->agp->info.ai_aperture_size - 1) {
+		    dev->agp->agp_info.ai_aperture_size * 1024 * 1024 - 1) {
 			map->offset += dev->agp->base;
 		}
-		map->mtrr   = dev->agp->mtrr; /* for getmap */
-		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
+		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
+
+		/* This assumes the DRM is in total control of AGP space.
+		 * It's not always the case as AGP can be in the control
+		 * of user space (i.e. i810 driver). So this loop will get
+		 * skipped and we double check that dev->agp->memory is
+		 * actually set as well as being invalid before EPERM'ing
+		 */
+		list_for_each_entry(entry, &dev->agp->memory, head) {
 			if ((map->offset >= entry->bound) &&
-			    (map->offset + map->size <=
-			    entry->bound + entry->pages * PAGE_SIZE)) {
+			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
 				valid = 1;
 				break;
 			}
 		}
-		if (!valid) {
+		if (!list_empty(&dev->agp->memory) && !valid) {
 			free(map, DRM_MEM_MAPS);
-			DRM_LOCK(dev);
-			return EACCES;
-		}*/
+			return -EPERM;
+		}
+		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
+			  (unsigned long long)map->offset, map->size);
+
 		break;
+	}
+	case _DRM_GEM:
+		DRM_ERROR("tried to addmap GEM object\n");
+		break;
 	case _DRM_SCATTER_GATHER:
 		if (!dev->sg) {
 			free(map, DRM_MEM_MAPS);
-			DRM_LOCK(dev);
-			return EINVAL;
+			return -EINVAL;
 		}
-		map->virtual = (void *)(dev->sg->vaddr + offset);
-		map->offset = dev->sg->vaddr + offset;
+		map->handle = (void *)(dev->sg->vaddr + offset);
+		map->offset += dev->sg->vaddr;
 		break;
 	case _DRM_CONSISTENT:
-		/* Unfortunately, we don't get any alignment specification from
-		 * the caller, so we have to guess.  drm_pci_alloc requires
-		 * a power-of-two alignment, so try to align the bus address of
-		 * the map to it size if possible, otherwise just assume
-		 * PAGE_SIZE alignment.
-		 */
+		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
+		 * As we're limiting the address to 2^32-1 (or less),
+		 * casting it down to 32 bits is no problem, but we
+		 * need to point to a 64bit variable first. */
 		align = map->size;
 		if ((align & (align - 1)) != 0)
 			align = PAGE_SIZE;
-		map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
-		if (map->dmah == NULL) {
+		dmah = drm_pci_alloc(dev, map->size, align, BUS_SPACE_MAXADDR);
+		if (!dmah) {
 			free(map, DRM_MEM_MAPS);
-			DRM_LOCK(dev);
-			return ENOMEM;
+			return -ENOMEM;
 		}
-		map->virtual = map->dmah->vaddr;
-		map->offset = map->dmah->busaddr;
+		map->handle = dmah->vaddr;
+		map->offset = dmah->busaddr;
+		map->dmah = dmah;
 		break;
 	default:
-		DRM_ERROR("Bad map type %d\n", map->type);
 		free(map, DRM_MEM_MAPS);
-		DRM_LOCK(dev);
-		return EINVAL;
+		return -EINVAL;
 	}
 
+	list = malloc(sizeof(*list), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
+	if (!list) {
+		if (map->type == _DRM_REGISTERS)
+			drm_core_ioremapfree(map, dev);
+		free(map, DRM_MEM_MAPS);
+		return -EINVAL;
+	}
+	list->map = map;
+
 	DRM_LOCK(dev);
-	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
+	list_add(&list->head, &dev->maplist);
 
-done:
-	/* Jumped to, with lock held, when a kernel map is found. */
+	/* Assign a 32-bit handle */
+	/* We do it here so that dev->struct_mutex protects the increment */
+	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+		map->offset;
+	ret = drm_map_handle(dev, &list->hash, user_token, 0,
+			     (map->type == _DRM_SHM));
+	if (ret) {
+		if (map->type == _DRM_REGISTERS)
+			drm_core_ioremapfree(map, dev);
+		free(map, DRM_MEM_MAPS);
+		free(list, DRM_MEM_MAPS);
+		DRM_UNLOCK(dev);
+		return ret;
+	}
 
-	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
-	    map->size);
+	list->user_token = list->hash.key << PAGE_SHIFT;
+	DRM_UNLOCK(dev);
 
-	*map_ptr = map;
+	if (!(map->flags & _DRM_DRIVER))
+		list->master = dev->primary->master;
+	*maplist = list;
+	return 0;
+	}
 
-	return 0;
+int drm_addmap(struct drm_device * dev, resource_size_t offset,
+	       unsigned int size, enum drm_map_type type,
+	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
+{
+	struct drm_map_list *list;
+	int rc;
+
+	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+	if (!rc)
+		*map_ptr = list->map;
+	return rc;
 }
 
+EXPORT_SYMBOL(drm_addmap);
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a
+ * non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ */
 int drm_addmap_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *file_priv)
 {
-	struct drm_map *request = data;
-	drm_local_map_t *map;
+	struct drm_map *map = data;
+	struct drm_map_list *maplist;
 	int err;
 
-	if (!(dev->flags & (FREAD|FWRITE)))
-		return EACCES; /* Require read/write */
+	if (!(DRM_SUSER(DRM_CURPROC) || map->type == _DRM_AGP || map->type == _DRM_SHM))
+		return -EPERM;
 
-	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
-		return EACCES;
+	err = drm_addmap_core(dev, map->offset, map->size, map->type,
+			      map->flags, &maplist);
 
-	DRM_LOCK(dev);
-	err = drm_addmap(dev, request->offset, request->size, request->type,
-	    request->flags, &map);
-	DRM_UNLOCK(dev);
-	if (err != 0)
+	if (err)
 		return err;
 
-	request->offset = map->offset;
-	request->size = map->size;
-	request->type = map->type;
-	request->flags = map->flags;
-	request->mtrr   = map->mtrr;
-	request->handle = (void *)map->handle;
-
+	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+	map->handle = (void *)(unsigned long)maplist->user_token;
 	return 0;
 }
 
-void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 {
-	DRM_LOCK_ASSERT(dev);
+	struct drm_map_list *r_list = NULL, *list_t;
+	int found = 0;
+	struct drm_master *master;
 
-	if (map == NULL)
-		return;
+	/* Find the list entry for the map and remove it */
+	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+		if (r_list->map == map) {
+			master = r_list->master;
+			list_del(&r_list->head);
+			drm_ht_remove_key(&dev->map_hash,
+					  r_list->user_token >> PAGE_SHIFT);
+			free(r_list, DRM_MEM_MAPS);
+			found = 1;
+			break;
+		}
+	}
 
-	TAILQ_REMOVE(&dev->maplist, map, link);
+	if (!found)
+		return -EINVAL;
 
 	switch (map->type) {
 	case _DRM_REGISTERS:
-		if (map->bsr == NULL)
-			drm_ioremapfree(map);
+		drm_core_ioremapfree(map, dev);
 		/* FALLTHROUGH */
 	case _DRM_FRAME_BUFFER:
-		if (map->mtrr) {
-			int __unused retcode;
-			
-			retcode = drm_mtrr_del(0, map->offset, map->size,
-			    DRM_MTRR_WC);
-			DRM_DEBUG("mtrr_del = %d\n", retcode);
+		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+			int retcode;
+			retcode = drm_mtrr_del(map->mtrr, map->offset,
+			    map->size, DRM_MTRR_WC);
+			DRM_DEBUG("mtrr_del=%d\n", retcode);
 		}
 		break;
 	case _DRM_SHM:
-		free(map->virtual, DRM_MEM_MAPS);
+		free(map->handle, DRM_MEM_MAPS);
+		if (master) {
+			if (dev->sigdata.lock == master->lock.hw_lock)
+				dev->sigdata.lock = NULL;
+			master->lock.hw_lock = NULL;   /* SHM removed */
+			master->lock.file_priv = NULL;
+			DRM_WAKEUP_INT((void *)&master->lock.lock_queue);
+		}
 		break;
 	case _DRM_AGP:
 	case _DRM_SCATTER_GATHER:
@@ -332,64 +561,100 @@
 	case _DRM_CONSISTENT:
 		drm_pci_free(dev, map->dmah);
 		break;
-	default:
-		DRM_ERROR("Bad map type %d\n", map->type);
+	case _DRM_GEM:
+		DRM_ERROR("tried to rmmap GEM object\n");
 		break;
 	}
+	free(map, DRM_MEM_MAPS);
 
-	if (map->bsr != NULL) {
-		bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
-		    map->bsr);
-	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_rmmap_locked);
 
+int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
+{
+	int ret;
+
+	DRM_LOCK(dev);
+	ret = drm_rmmap_locked(dev, map);
 	DRM_UNLOCK(dev);
-	if (map->handle)
-		free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
-		    DRM_MAP_HANDLE_SHIFT);
-	DRM_LOCK(dev);
 
-	free(map, DRM_MEM_MAPS);
+	return ret;
 }
+EXPORT_SYMBOL(drm_rmmap);
 
-/* Remove a map private from list and deallocate resources if the mapping
- * isn't in use.
+/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly.  Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about.  This seems
+ * unlikely.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
  */
-
 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv)
 {
-	drm_local_map_t *map;
 	struct drm_map *request = data;
+	struct drm_local_map *map = NULL;
+	struct drm_map_list *r_list;
+	int ret;
 
 	DRM_LOCK(dev);
-	TAILQ_FOREACH(map, &dev->maplist, link) {
-		if (map->handle == request->handle &&
-		    map->flags & _DRM_REMOVABLE)
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map &&
+		    r_list->user_token == (unsigned long)request->handle &&
+		    r_list->map->flags & _DRM_REMOVABLE) {
+			map = r_list->map;
 			break;
+		}
 	}
 
-	/* No match found. */
-	if (map == NULL) {
+	/* List has wrapped around to the head pointer, or its empty we didn't
+	 * find anything.
+	 */
+	if (list_empty(&dev->maplist) || !map) {
 		DRM_UNLOCK(dev);
-		return EINVAL;
+		return -EINVAL;
 	}
 
-	drm_rmmap(dev, map);
+	/* Register and framebuffer maps are permanent */
+	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+		DRM_UNLOCK(dev);
+		return 0;
+	}
 
+	ret = drm_rmmap_locked(dev, map);
+
 	DRM_UNLOCK(dev);
 
-	return 0;
+	return ret;
 }
 
-
-static void drm_cleanup_buf_error(struct drm_device *dev,
-				  drm_buf_entry_t *entry)
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+				  struct drm_buf_entry * entry)
 {
 	int i;
 
 	if (entry->seg_count) {
 		for (i = 0; i < entry->seg_count; i++) {
-			drm_pci_free(dev, entry->seglist[i]);
+			if (entry->seglist[i]) {
+				drm_pci_free(dev, entry->seglist[i]);
+			}
 		}
 		free(entry->seglist, DRM_MEM_SEGS);
 
@@ -396,8 +661,8 @@
 		entry->seg_count = 0;
 	}
 
-   	if (entry->buf_count) {
-	   	for (i = 0; i < entry->buf_count; i++) {
+	if (entry->buf_count) {
+		for (i = 0; i < entry->buf_count; i++) {
 			free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
 		}
 		free(entry->buflist, DRM_MEM_BUFS);
@@ -406,13 +671,24 @@
 	}
 }
 
-static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
+#if __OS_HAS_AGP
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
 {
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_entry_t *entry;
-	/*drm_agp_mem_t *agp_entry;
-	int valid*/
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_agp_mem *agp_entry;
+	struct drm_buf *buf;
 	unsigned long offset;
 	unsigned long agp_offset;
 	int count;
@@ -422,15 +698,18 @@
 	int page_order;
 	int total;
 	int byte_count;
-	int i;
-	drm_buf_t **temp_buflist;
+	int i, valid;
+	struct drm_buf **temp_buflist;
 
+	if (!dma)
+		return -EINVAL;
+
 	count = request->count;
 	order = drm_order(request->size);
 	size = 1 << order;
 
-	alignment  = (request->flags & _DRM_PAGE_ALIGN)
-	    ? round_page(size) : size;
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 	total = PAGE_SIZE << page_order;
 
@@ -437,40 +716,58 @@
 	byte_count = 0;
 	agp_offset = dev->agp->base + request->agp_start;
 
-	DRM_DEBUG("count:      %d\n",  count);
-	DRM_DEBUG("order:      %d\n",  order);
-	DRM_DEBUG("size:       %d\n",  size);
-	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
-	DRM_DEBUG("alignment:  %d\n",  alignment);
-	DRM_DEBUG("page_order: %d\n",  page_order);
-	DRM_DEBUG("total:      %d\n",  total);
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
 
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+
 	/* Make sure buffers are located in AGP memory that we own */
-	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
-	 * memory.  Safe to ignore for now because these ioctls are still
-	 * root-only.
-	 */
-	/*valid = 0;
-	for (agp_entry = dev->agp->memory; agp_entry;
-	    agp_entry = agp_entry->next) {
+	valid = 0;
+	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
 		if ((agp_offset >= agp_entry->bound) &&
-		    (agp_offset + total * count <=
-		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
 			valid = 1;
 			break;
 		}
 	}
-	if (!valid) {
+	if (!list_empty(&dev->agp->memory) && !valid) {
 		DRM_DEBUG("zone invalid\n");
-		return EINVAL;
-	}*/
+		return -EINVAL;
+	}
+	mtx_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		mtx_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	mtx_unlock(&dev->count_lock);
 
+	DRM_LOCK(dev);
 	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
 
+	if (count < 0 || count > 4096) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
 	    M_NOWAIT | M_ZERO);
 	if (!entry->buflist) {
-		return ENOMEM;
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
 	}
 
 	entry->buf_size = size;
@@ -479,29 +776,34 @@
 	offset = 0;
 
 	while (entry->buf_count < count) {
-		buf          = &entry->buflist[entry->buf_count];
-		buf->idx     = dma->buf_count + entry->buf_count;
-		buf->total   = alignment;
-		buf->order   = order;
-		buf->used    = 0;
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
 
-		buf->offset  = (dma->byte_count + offset);
+		buf->offset = (dma->byte_count + offset);
 		buf->bus_address = agp_offset + offset;
 		buf->address = (void *)(agp_offset + offset);
-		buf->next    = NULL;
+		buf->next = NULL;
+		buf->waiting = 0;
 		buf->pending = 0;
 		buf->file_priv = NULL;
 
-		buf->dev_priv_size = dev->driver->buf_priv_size;
+		buf->dev_priv_size = dev->driver->dev_priv_size;
 		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
 		    M_NOWAIT | M_ZERO);
-		if (buf->dev_private == NULL) {
+		if (!buf->dev_private) {
 			/* Set count correctly so we free the proper amount. */
 			entry->buf_count = count;
 			drm_cleanup_buf_error(dev, entry);
-			return ENOMEM;
+			DRM_UNLOCK(dev);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
 		}
 
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
 		offset += alignment;
 		entry->buf_count++;
 		byte_count += PAGE_SIZE << page_order;
@@ -512,10 +814,12 @@
 	temp_buflist = realloc(dma->buflist,
 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
 	    DRM_MEM_BUFS, M_NOWAIT);
-	if (temp_buflist == NULL) {
+	if (!temp_buflist) {
 		/* Free the entry because it isn't valid */
 		drm_cleanup_buf_error(dev, entry);
-		return ENOMEM;
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
 	}
 	dma->buflist = temp_buflist;
 
@@ -524,29 +828,37 @@
 	}
 
 	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
 	dma->byte_count += byte_count;
 
 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
+	DRM_UNLOCK(dev);
+
 	request->count = entry->buf_count;
 	request->size = size;
 
 	dma->flags = _DRM_DMA_USE_AGP;
 
+	atomic_dec(&dev->buf_alloc);
 	return 0;
 }
+EXPORT_SYMBOL(drm_addbufs_agp);
+#endif				/* __OS_HAS_AGP */
 
-static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
+int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int count;
 	int order;
 	int size;
 	int total;
 	int page_order;
-	drm_buf_entry_t *entry;
-	drm_buf_t *buf;
+	struct drm_buf_entry *entry;
+	drm_dma_handle_t *dmah;
+	struct drm_buf *buf;
 	int alignment;
 	unsigned long offset;
 	int i;
@@ -553,26 +865,70 @@
 	int byte_count;
 	int page_count;
 	unsigned long *temp_pagelist;
-	drm_buf_t **temp_buflist;
+	struct drm_buf **temp_buflist;
 
+	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	if (!DRM_SUSER(DRM_CURPROC))
+		return -EPERM;
+
 	count = request->count;
 	order = drm_order(request->size);
 	size = 1 << order;
 
 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
-	    request->count, request->size, size, order);
+		  request->count, request->size, size, order);
 
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+
 	alignment = (request->flags & _DRM_PAGE_ALIGN)
-	    ? round_page(size) : size;
+	    ? PAGE_ALIGN(size) : size;
 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 	total = PAGE_SIZE << page_order;
 
+	mtx_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		mtx_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	mtx_unlock(&dev->count_lock);
+
+	DRM_LOCK(dev);
 	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
 
+	if (count < 0 || count > 4096) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
 	    M_NOWAIT | M_ZERO);
+	if (!entry->buflist) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+
 	entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
 	    M_NOWAIT | M_ZERO);
+	if (!entry->seglist) {
+		free(entry->buflist, DRM_MEM_BUFS);
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
 
 	/* Keep the original pagelist until we know all the allocations
 	 * have succeeded
@@ -579,77 +935,77 @@
 	 */
 	temp_pagelist = malloc((dma->page_count + (count << page_order)) *
 	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
-
-	if (entry->buflist == NULL || entry->seglist == NULL || 
-	    temp_pagelist == NULL) {
-		free(temp_pagelist, DRM_MEM_PAGES);
+	if (!temp_pagelist) {
+		free(entry->buflist, DRM_MEM_BUFS);
 		free(entry->seglist, DRM_MEM_SEGS);
-		free(entry->buflist, DRM_MEM_BUFS);
-		return ENOMEM;
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
 	}
-
-	memcpy(temp_pagelist, dma->pagelist, dma->page_count * 
-	    sizeof(*dma->pagelist));
-
+	memcpy(temp_pagelist,
+	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
 	DRM_DEBUG("pagelist: %d entries\n",
-	    dma->page_count + (count << page_order));
+		  dma->page_count + (count << page_order));
 
-	entry->buf_size	= size;
+	entry->buf_size = size;
 	entry->page_order = page_order;
 	byte_count = 0;
 	page_count = 0;
 
 	while (entry->buf_count < count) {
-		DRM_SPINUNLOCK(&dev->dma_lock);
-		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
-		    0xfffffffful);
-		DRM_SPINLOCK(&dev->dma_lock);
-		if (dmah == NULL) {
+
+		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, BUS_SPACE_MAXADDR);
+
+		if (!dmah) {
 			/* Set count correctly so we free the proper amount. */
 			entry->buf_count = count;
 			entry->seg_count = count;
 			drm_cleanup_buf_error(dev, entry);
 			free(temp_pagelist, DRM_MEM_PAGES);
-			return ENOMEM;
+			DRM_UNLOCK(dev);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
 		}
-
 		entry->seglist[entry->seg_count++] = dmah;
 		for (i = 0; i < (1 << page_order); i++) {
-			DRM_DEBUG("page %d @ %p\n",
-			    dma->page_count + page_count,
-			    (char *)dmah->vaddr + PAGE_SIZE * i);
-			temp_pagelist[dma->page_count + page_count++] = 
-			    (long)dmah->vaddr + PAGE_SIZE * i;
+			DRM_DEBUG("page %d @ 0x%08lx\n",
+				  dma->page_count + page_count,
+				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
+			temp_pagelist[dma->page_count + page_count++]
+				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
 		}
 		for (offset = 0;
-		    offset + size <= total && entry->buf_count < count;
-		    offset += alignment, ++entry->buf_count) {
-			buf	     = &entry->buflist[entry->buf_count];
-			buf->idx     = dma->buf_count + entry->buf_count;
-			buf->total   = alignment;
-			buf->order   = order;
-			buf->used    = 0;
-			buf->offset  = (dma->byte_count + byte_count + offset);
-			buf->address = ((char *)dmah->vaddr + offset);
+		     offset + size <= total && entry->buf_count < count;
+		     offset += alignment, ++entry->buf_count) {
+			buf = &entry->buflist[entry->buf_count];
+			buf->idx = dma->buf_count + entry->buf_count;
+			buf->total = alignment;
+			buf->order = order;
+			buf->used = 0;
+			buf->offset = (dma->byte_count + byte_count + offset);
+			buf->address = (void *)((char *)dmah->vaddr + offset);
 			buf->bus_address = dmah->busaddr + offset;
-			buf->next    = NULL;
+			buf->next = NULL;
+			buf->waiting = 0;
 			buf->pending = 0;
 			buf->file_priv = NULL;
 
-			buf->dev_priv_size = dev->driver->buf_priv_size;
+			buf->dev_priv_size = dev->driver->dev_priv_size;
 			buf->dev_private = malloc(buf->dev_priv_size,
 			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
-			if (buf->dev_private == NULL) {
+			if (!buf->dev_private) {
 				/* Set count correctly so we free the proper amount. */
 				entry->buf_count = count;
 				entry->seg_count = count;
 				drm_cleanup_buf_error(dev, entry);
 				free(temp_pagelist, DRM_MEM_PAGES);
-				return ENOMEM;
+				DRM_UNLOCK(dev);
+				atomic_dec(&dev->buf_alloc);
+				return -ENOMEM;
 			}
 
 			DRM_DEBUG("buffer %d @ %p\n",
-			    entry->buf_count, buf->address);
+				  entry->buf_count, buf->address);
 		}
 		byte_count += PAGE_SIZE << page_order;
 	}
@@ -657,11 +1013,13 @@
 	temp_buflist = realloc(dma->buflist,
 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
 	    DRM_MEM_BUFS, M_NOWAIT);
-	if (temp_buflist == NULL) {
+	if (!temp_buflist) {
 		/* Free the entry because it isn't valid */
 		drm_cleanup_buf_error(dev, entry);
 		free(temp_pagelist, DRM_MEM_PAGES);
-		return ENOMEM;
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
 	}
 	dma->buflist = temp_buflist;
 
@@ -669,10 +1027,12 @@
 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 	}
 
-	/* No allocations failed, so now we can replace the orginal pagelist
+	/* No allocations failed, so now we can replace the original pagelist
 	 * with the new one.
 	 */
-	free(dma->pagelist, DRM_MEM_PAGES);
+	if (dma->page_count) {
+		free(dma->pagelist, DRM_MEM_PAGES);
+	}
 	dma->pagelist = temp_pagelist;
 
 	dma->buf_count += entry->buf_count;
@@ -680,18 +1040,25 @@
 	dma->page_count += entry->seg_count << page_order;
 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
 
+	DRM_UNLOCK(dev);
+
 	request->count = entry->buf_count;
 	request->size = size;
 
+	if (request->flags & _DRM_PCI_BUFFER_RO)
+		dma->flags = _DRM_DMA_USE_PCI_RO;
+
+	atomic_dec(&dev->buf_alloc);
 	return 0;
 
 }
+EXPORT_SYMBOL(drm_addbufs_pci);
 
-static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
 {
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_entry_t *entry;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_buf *buf;
 	unsigned long offset;
 	unsigned long agp_offset;
 	int count;
@@ -702,14 +1069,23 @@
 	int total;
 	int byte_count;
 	int i;
-	drm_buf_t **temp_buflist;
+	struct drm_buf **temp_buflist;
 
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	if (!DRM_SUSER(DRM_CURPROC))
+		return -EPERM;
+
 	count = request->count;
 	order = drm_order(request->size);
 	size = 1 << order;
 
-	alignment  = (request->flags & _DRM_PAGE_ALIGN)
-	    ? round_page(size) : size;
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 	total = PAGE_SIZE << page_order;
 
@@ -716,20 +1092,46 @@
 	byte_count = 0;
 	agp_offset = request->agp_start;
 
-	DRM_DEBUG("count:      %d\n",  count);
-	DRM_DEBUG("order:      %d\n",  order);
-	DRM_DEBUG("size:       %d\n",  size);
-	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
-	DRM_DEBUG("alignment:  %d\n",  alignment);
-	DRM_DEBUG("page_order: %d\n",  page_order);
-	DRM_DEBUG("total:      %d\n",  total);
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
 
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+
+	mtx_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		mtx_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	mtx_unlock(&dev->count_lock);
+
+	DRM_LOCK(dev);
 	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
 
+	if (count < 0 || count > 4096) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
 	    M_NOWAIT | M_ZERO);
-	if (entry->buflist == NULL)
-		return ENOMEM;
+	if (!entry->buflist) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
 
 	entry->buf_size = size;
 	entry->page_order = page_order;
@@ -737,31 +1139,34 @@
 	offset = 0;
 
 	while (entry->buf_count < count) {
-		buf          = &entry->buflist[entry->buf_count];
-		buf->idx     = dma->buf_count + entry->buf_count;
-		buf->total   = alignment;
-		buf->order   = order;
-		buf->used    = 0;
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
 
-		buf->offset  = (dma->byte_count + offset);
+		buf->offset = (dma->byte_count + offset);
 		buf->bus_address = agp_offset + offset;
-		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
-		buf->next    = NULL;
+		buf->address = (void *)(agp_offset + offset
+					+ (unsigned long)dev->sg->vaddr);
+		buf->next = NULL;
+		buf->waiting = 0;
 		buf->pending = 0;
 		buf->file_priv = NULL;
 
-		buf->dev_priv_size = dev->driver->buf_priv_size;
+		buf->dev_priv_size = dev->driver->dev_priv_size;
 		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
 		    M_NOWAIT | M_ZERO);
-		if (buf->dev_private == NULL) {
+		if (!buf->dev_private) {
 			/* Set count correctly so we free the proper amount. */
 			entry->buf_count = count;
 			drm_cleanup_buf_error(dev, entry);
-			return ENOMEM;
+			DRM_UNLOCK(dev);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
 		}
 
-		DRM_DEBUG("buffer %d @ %p\n",
-		    entry->buf_count, buf->address);
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 
 		offset += alignment;
 		entry->buf_count++;
@@ -773,10 +1178,12 @@
 	temp_buflist = realloc(dma->buflist,
 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
 	    DRM_MEM_BUFS, M_NOWAIT);
-	if (temp_buflist == NULL) {
+	if (!temp_buflist) {
 		/* Free the entry because it isn't valid */
 		drm_cleanup_buf_error(dev, entry);
-		return ENOMEM;
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
 	}
 	dma->buflist = temp_buflist;
 
@@ -785,144 +1192,256 @@
 	}
 
 	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
 	dma->byte_count += byte_count;
 
 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
+	DRM_UNLOCK(dev);
+
 	request->count = entry->buf_count;
 	request->size = size;
 
 	dma->flags = _DRM_DMA_USE_SG;
 
+	atomic_dec(&dev->buf_alloc);
 	return 0;
 }
 
-int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
+static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
 {
-	int order, ret;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_buf *buf;
+	unsigned long offset;
+	unsigned long agp_offset;
+	int count;
+	int order;
+	int size;
+	int alignment;
+	int page_order;
+	int total;
+	int byte_count;
+	int i;
+	struct drm_buf **temp_buflist;
 
-	if (request->count < 0 || request->count > 4096)
-		return EINVAL;
-	
-	order = drm_order(request->size);
-	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-		return EINVAL;
+	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
+		return -EINVAL;
 
-	DRM_SPINLOCK(&dev->dma_lock);
+	if (!dma)
+		return -EINVAL;
 
-	/* No more allocations after first buffer-using ioctl. */
-	if (dev->buf_use != 0) {
-		DRM_SPINUNLOCK(&dev->dma_lock);
-		return EBUSY;
-	}
-	/* No more than one allocation per order */
-	if (dev->dma->bufs[order].buf_count != 0) {
-		DRM_SPINUNLOCK(&dev->dma_lock);
-		return ENOMEM;
-	}
+	if (!DRM_SUSER(DRM_CURPROC))
+		return -EPERM;
 
-	ret = drm_do_addbufs_agp(dev, request);
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
 
-	DRM_SPINUNLOCK(&dev->dma_lock);
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
 
-	return ret;
-}
+	byte_count = 0;
+	agp_offset = request->agp_start;
 
-int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
-{
-	int order, ret;
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
 
-	if (!DRM_SUSER(DRM_CURPROC))
-		return EACCES;
-
-	if (request->count < 0 || request->count > 4096)
-		return EINVAL;
-
-	order = drm_order(request->size);
 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-		return EINVAL;
+		return -EINVAL;
 
-	DRM_SPINLOCK(&dev->dma_lock);
+	mtx_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		mtx_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	mtx_unlock(&dev->count_lock);
 
-	/* No more allocations after first buffer-using ioctl. */
-	if (dev->buf_use != 0) {
-		DRM_SPINUNLOCK(&dev->dma_lock);
-		return EBUSY;
+	DRM_LOCK(dev);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
 	}
-	/* No more than one allocation per order */
-	if (dev->dma->bufs[order].buf_count != 0) {
-		DRM_SPINUNLOCK(&dev->dma_lock);
-		return ENOMEM;
+
+	if (count < 0 || count > 4096) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
 	}
 
-	ret = drm_do_addbufs_sg(dev, request);
+	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
+	    M_NOWAIT | M_ZERO);
+	if (!entry->buflist) {
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
 
-	DRM_SPINUNLOCK(&dev->dma_lock);
+	entry->buf_size = size;
+	entry->page_order = page_order;
 
-	return ret;
-}
+	offset = 0;
 
-int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
-{
-	int order, ret;
+	while (entry->buf_count < count) {
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
 
-	if (!DRM_SUSER(DRM_CURPROC))
-		return EACCES;
+		buf->offset = (dma->byte_count + offset);
+		buf->bus_address = agp_offset + offset;
+		buf->address = (void *)(agp_offset + offset);
+		buf->next = NULL;
+		buf->waiting = 0;
+		buf->pending = 0;
+		buf->file_priv = NULL;
 
-	if (request->count < 0 || request->count > 4096)
-		return EINVAL;
+		buf->dev_priv_size = dev->driver->dev_priv_size;
+		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
+		    M_NOWAIT | M_ZERO);
+		if (!buf->dev_private) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			DRM_UNLOCK(dev);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
 
-	order = drm_order(request->size);
-	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-		return EINVAL;
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 
-	DRM_SPINLOCK(&dev->dma_lock);
+		offset += alignment;
+		entry->buf_count++;
+		byte_count += PAGE_SIZE << page_order;
+	}
 
-	/* No more allocations after first buffer-using ioctl. */
-	if (dev->buf_use != 0) {
-		DRM_SPINUNLOCK(&dev->dma_lock);
-		return EBUSY;
+	DRM_DEBUG("byte_count: %d\n", byte_count);
+
+	temp_buflist = realloc(dma->buflist,
+	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
+	    DRM_MEM_BUFS, M_NOWAIT);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		DRM_UNLOCK(dev);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
 	}
-	/* No more than one allocation per order */
-	if (dev->dma->bufs[order].buf_count != 0) {
-		DRM_SPINUNLOCK(&dev->dma_lock);
-		return ENOMEM;
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 	}
 
-	ret = drm_do_addbufs_pci(dev, request);
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
+	dma->byte_count += byte_count;
 
-	DRM_SPINUNLOCK(&dev->dma_lock);
+	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
-	return ret;
+	DRM_UNLOCK(dev);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	dma->flags = _DRM_DMA_USE_FB;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
 }
 
-int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+int drm_addbufs(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
 {
 	struct drm_buf_desc *request = data;
-	int err;
+	int ret;
 
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+#if __OS_HAS_AGP
 	if (request->flags & _DRM_AGP_BUFFER)
-		err = drm_addbufs_agp(dev, request);
-	else if (request->flags & _DRM_SG_BUFFER)
-		err = drm_addbufs_sg(dev, request);
+		ret = drm_addbufs_agp(dev, request);
 	else
-		err = drm_addbufs_pci(dev, request);
+#endif
+	if (request->flags & _DRM_SG_BUFFER)
+		ret = drm_addbufs_sg(dev, request);
+	else if (request->flags & _DRM_FB_BUFFER)
+		ret = drm_addbufs_fb(dev, request);
+	else
+		ret = drm_addbufs_pci(dev, request);
 
-	return err;
+	return ret;
 }
 
-int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+int drm_infobufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	struct drm_buf_info *request = data;
 	int i;
 	int count;
-	int retcode = 0;
 
-	DRM_SPINLOCK(&dev->dma_lock);
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	mtx_lock(&dev->count_lock);
+	if (atomic_read(&dev->buf_alloc)) {
+		mtx_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
 	++dev->buf_use;		/* Can't allocate more after this call */
-	DRM_SPINUNLOCK(&dev->dma_lock);
+	mtx_unlock(&dev->count_lock);
 
 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
 		if (dma->bufs[i].buf_count)
@@ -934,24 +1453,30 @@
 	if (request->count >= count) {
 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
 			if (dma->bufs[i].buf_count) {
-				struct drm_buf_desc from;
+				struct drm_buf_desc __user *to =
+				    &request->list[count];
+				struct drm_buf_entry *from = &dma->bufs[i];
+				struct drm_freelist *list = &dma->bufs[i].freelist;
+				if (copy_to_user(&to->count,
+						 &from->buf_count,
+						 sizeof(from->buf_count)) ||
+				    copy_to_user(&to->size,
+						 &from->buf_size,
+						 sizeof(from->buf_size)) ||
+				    copy_to_user(&to->low_mark,
+						 &list->low_mark,
+						 sizeof(list->low_mark)) ||
+				    copy_to_user(&to->high_mark,
+						 &list->high_mark,
+						 sizeof(list->high_mark)))
+					return -EFAULT;
 
-				from.count = dma->bufs[i].buf_count;
-				from.size = dma->bufs[i].buf_size;
-				from.low_mark = dma->bufs[i].freelist.low_mark;
-				from.high_mark = dma->bufs[i].freelist.high_mark;
-
-				if (DRM_COPY_TO_USER(&request->list[count], &from,
-				    sizeof(struct drm_buf_desc)) != 0) {
-					retcode = EFAULT;
-					break;
-				}
-
 				DRM_DEBUG("%d %d %d %d %d\n",
-				    i, dma->bufs[i].buf_count,
-				    dma->bufs[i].buf_size,
-				    dma->bufs[i].freelist.low_mark,
-				    dma->bufs[i].freelist.high_mark);
+					  i,
+					  dma->bufs[i].buf_count,
+					  dma->bufs[i].buf_size,
+					  dma->bufs[i].freelist.low_mark,
+					  dma->bufs[i].freelist.high_mark);
 				++count;
 			}
 		}
@@ -958,174 +1483,227 @@
 	}
 	request->count = count;
 
-	return retcode;
+	return 0;
 }
 
-int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+int drm_markbufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	struct drm_buf_desc *request = data;
 	int order;
+	struct drm_buf_entry *entry;
 
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
 	DRM_DEBUG("%d, %d, %d\n",
 		  request->size, request->low_mark, request->high_mark);
-	
+	order = drm_order(request->size);
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+	entry = &dma->bufs[order];
 
-	order = drm_order(request->size);	
-	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
-	    request->low_mark < 0 || request->high_mark < 0) {
-		return EINVAL;
-	}
+	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+		return -EINVAL;
+	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+		return -EINVAL;
 
-	DRM_SPINLOCK(&dev->dma_lock);
-	if (request->low_mark > dma->bufs[order].buf_count ||
-	    request->high_mark > dma->bufs[order].buf_count) {
-		DRM_SPINUNLOCK(&dev->dma_lock);
-		return EINVAL;
-	}
+	entry->freelist.low_mark = request->low_mark;
+	entry->freelist.high_mark = request->high_mark;
 
-	dma->bufs[order].freelist.low_mark  = request->low_mark;
-	dma->bufs[order].freelist.high_mark = request->high_mark;
-	DRM_SPINUNLOCK(&dev->dma_lock);
-
 	return 0;
 }
 
-int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+int drm_freebufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	struct drm_buf_free *request = data;
 	int i;
 	int idx;
-	drm_buf_t *buf;
-	int retcode = 0;
+	struct drm_buf *buf;
 
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", request->count);
-	
-	DRM_SPINLOCK(&dev->dma_lock);
 	for (i = 0; i < request->count; i++) {
-		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
-			retcode = EFAULT;
-			break;
-		}
+		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
+			return -EFAULT;
 		if (idx < 0 || idx >= dma->buf_count) {
 			DRM_ERROR("Index %d (of %d max)\n",
-			    idx, dma->buf_count - 1);
-			retcode = EINVAL;
-			break;
+				  idx, dma->buf_count - 1);
+			return -EINVAL;
 		}
 		buf = dma->buflist[idx];
 		if (buf->file_priv != file_priv) {
 			DRM_ERROR("Process %d freeing buffer not owned\n",
-			    DRM_CURRENTPID);
-			retcode = EINVAL;
-			break;
+				  DRM_CURRENTPID);
+			return -EINVAL;
 		}
 		drm_free_buffer(dev, buf);
 	}
-	DRM_SPINUNLOCK(&dev->dma_lock);
 
-	return retcode;
+	return 0;
 }
 
-int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+int drm_mapbufs(struct drm_device *dev, void *data,
+	        struct drm_file *file_priv)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int retcode = 0;
 	const int zero = 0;
+	vm_offset_t virtual;
 	vm_offset_t address;
 	struct vmspace *vms;
-	vm_ooffset_t foff;
-	vm_size_t size;
-	vm_offset_t vaddr;
 	struct drm_buf_map *request = data;
 	int i;
 
-	vms = DRM_CURPROC->td_proc->p_vmspace;
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
 
-	DRM_SPINLOCK(&dev->dma_lock);
-	dev->buf_use++;		/* Can't allocate more after this call */
-	DRM_SPINUNLOCK(&dev->dma_lock);
+	if (!dma)
+		return -EINVAL;
 
-	if (request->count < dma->buf_count)
-		goto done;
-
-	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
-	    (drm_core_check_feature(dev, DRIVER_SG) &&
-	    (dma->flags & _DRM_DMA_USE_SG))) {
-		drm_local_map_t *map = dev->agp_buffer_map;
-
-		if (map == NULL) {
-			retcode = EINVAL;
-			goto done;
-		}
-		size = round_page(map->size);
-		foff = (unsigned long)map->handle;
-	} else {
-		size = round_page(dma->byte_count),
-		foff = 0;
+	mtx_lock(&dev->count_lock);
+	if (atomic_read(&dev->buf_alloc)) {
+		mtx_unlock(&dev->count_lock);
+		return -EBUSY;
 	}
+	dev->buf_use++;		/* Can't allocate more after this call */
+	mtx_unlock(&dev->count_lock);
 
-	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
-#if __FreeBSD_version >= 600023
-	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
-	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
-	    dev->devnode, foff);
-#else
-	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
-	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
-	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
-#endif
-	if (retcode)
-		goto done;
+	vms = DRM_CURPROC->td_proc->p_vmspace;
 
-	request->virtual = (void *)vaddr;
+	if (request->count >= dma->buf_count) {
+		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+		    || (drm_core_check_feature(dev, DRIVER_SG)
+			&& (dma->flags & _DRM_DMA_USE_SG))
+		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
+			&& (dma->flags & _DRM_DMA_USE_FB))) {
+			struct drm_local_map *map = dev->agp_buffer_map;
+			vm_ooffset_t token = dev->agp_buffer_token;
 
-	for (i = 0; i < dma->buf_count; i++) {
-		if (DRM_COPY_TO_USER(&request->list[i].idx,
-		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
-			retcode = EFAULT;
-			goto done;
+			if (!map) {
+				retcode = -EINVAL;
+				goto done;
+			}
+			retcode = vm_mmap(&vms->vm_map, &virtual, map->size,
+			    PROT_READ | PROT_WRITE, VM_PROT_ALL,
+			    MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
+			    file_priv->minor->device, token);
+		} else {
+			retcode = vm_mmap(&vms->vm_map, &virtual, dma->byte_count,
+			    PROT_READ | PROT_WRITE, VM_PROT_ALL,
+			    MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
+			    file_priv->minor->device, 0);
 		}
-		if (DRM_COPY_TO_USER(&request->list[i].total,
-		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
-			retcode = EFAULT;
+		if (retcode) {
+			/* Real error */
+			retcode = -retcode;
 			goto done;
 		}
-		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
-		    sizeof(zero))) {
-			retcode = EFAULT;
-			goto done;
+		request->virtual = (void __user *)virtual;
+
+		for (i = 0; i < dma->buf_count; i++) {
+			if (copy_to_user(&request->list[i].idx,
+					 &dma->buflist[i]->idx,
+					 sizeof(request->list[0].idx))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			if (copy_to_user(&request->list[i].total,
+					 &dma->buflist[i]->total,
+					 sizeof(request->list[0].total))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			if (copy_to_user(&request->list[i].used,
+					 &zero, sizeof(zero))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			address = virtual + dma->buflist[i]->offset;	/* *** */
+			if (copy_to_user(&request->list[i].address,
+					 &address, sizeof(address))) {
+				retcode = -EFAULT;
+				goto done;
+			}
 		}
-		address = vaddr + dma->buflist[i]->offset; /* *** */
-		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
-		    sizeof(address))) {
-			retcode = EFAULT;
-			goto done;
-		}
 	}
-
- done:
+      done:
 	request->count = dma->buf_count;
-
 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
 
 	return retcode;
 }
 
-/*
- * Compute order.  Can be made faster.
+/**
+ * Compute size order.  Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
  */
 int drm_order(unsigned long size)
 {
 	int order;
+	unsigned long tmp;
 
-	if (size == 0)
-		return 0;
+	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
 
-	order = flsl(size) - 1;
-	if (size & ~(1ul << order))
+	if (size & (size - 1))
 		++order;
 
 	return order;
 }
+EXPORT_SYMBOL(drm_order);

Modified: trunk/sys/dev/drm2/drm_context.c
===================================================================
--- trunk/sys/dev/drm2/drm_context.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_context.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,15 @@
 /* $MidnightBSD$ */
-/*-
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth at valinux.com
+ *
  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,29 +32,37 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_context.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_context.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/** @file drm_context.c
- * Implementation of the context management ioctls.
+/*
+ * ChangeLog:
+ *  2001-11-16	Torsten Duwe <duwe at caldera.de>
+ *		added context constructor/destructor hooks,
+ *		needed by SiS driver's memory management.
  */
 
 #include <dev/drm2/drmP.h>
 
-/* ================================================================
- * Context bitmap support
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
  */
-
-void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
-	if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP || 
+	if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
 	    dev->ctx_bitmap == NULL) {
 		DRM_ERROR("Attempt to free invalid context handle: %d\n",
 		   ctx_handle);
@@ -55,10 +73,18 @@
 	clear_bit(ctx_handle, dev->ctx_bitmap);
 	dev->context_sareas[ctx_handle] = NULL;
 	DRM_UNLOCK(dev);
-	return;
 }
 
-int drm_ctxbitmap_next(struct drm_device *dev)
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
+ */
+static int drm_ctxbitmap_next(struct drm_device * dev)
 {
 	int bit;
 
@@ -75,7 +101,7 @@
 	set_bit(bit, dev->ctx_bitmap);
 	DRM_DEBUG("bit : %d\n", bit);
 	if ((bit+1) > dev->max_context) {
-		drm_local_map_t **ctx_sareas;
+		struct drm_local_map **ctx_sareas;
 		int max_ctx = (bit+1);
 
 		ctx_sareas = realloc(dev->context_sareas,
@@ -95,7 +121,14 @@
 	return bit;
 }
 
-int drm_ctxbitmap_init(struct drm_device *dev)
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device * dev)
 {
 	int i;
    	int temp;
@@ -119,7 +152,15 @@
 	return 0;
 }
 
-void drm_ctxbitmap_cleanup(struct drm_device *dev)
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
 {
 	DRM_LOCK(dev);
 	if (dev->context_sareas != NULL)
@@ -128,15 +169,29 @@
 	DRM_UNLOCK(dev);
 }
 
-/* ================================================================
- * Per Context SAREA Support
+/*@}*/
+
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
+
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
  */
-
 int drm_getsareactx(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv)
 {
 	struct drm_ctx_priv_map *request = data;
-	drm_local_map_t *map;
+	struct drm_local_map *map;
 
 	DRM_LOCK(dev);
 	if (dev->max_context < 0 ||
@@ -153,15 +208,29 @@
 	return 0;
 }
 
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
+ */
 int drm_setsareactx(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv)
 {
 	struct drm_ctx_priv_map *request = data;
-	drm_local_map_t *map = NULL;
+	struct drm_local_map *map = NULL;
+	struct drm_map_list *r_list = NULL;
 
 	DRM_LOCK(dev);
-	TAILQ_FOREACH(map, &dev->maplist, link) {
-		if (map->handle == request->handle) {
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map
+		    && r_list->user_token == (unsigned long) request->handle) {
 			if (dev->max_context < 0)
 				goto bad;
 			if (request->ctx_id >= (unsigned) dev->max_context)
@@ -177,15 +246,27 @@
 	return EINVAL;
 }
 
-/* ================================================================
- * The actual DRM context handling routines
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
  */
-
-int drm_context_switch(struct drm_device *dev, int old, int new)
+static int drm_context_switch(struct drm_device * dev, int old, int new)
 {
 	if (test_and_set_bit(0, &dev->context_flag)) {
 		DRM_ERROR("Reentering -- FIXME\n");
-		return EBUSY;
+		return -EBUSY;
 	}
 
 	DRM_DEBUG("Context switch from %d to %d\n", old, new);
@@ -198,11 +279,24 @@
 	return 0;
 }
 
-int drm_context_switch_complete(struct drm_device *dev, int new)
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device *dev,
+				       struct drm_file *file_priv, int new)
 {
-	dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
+	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
+	dev->last_switch = jiffies;
 
-	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+	if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
 		DRM_ERROR("Lock isn't held after context switch\n");
 	}
 
@@ -210,11 +304,22 @@
 	   when the kernel holds the lock, release
 	   that lock here. */
 	clear_bit(0, &dev->context_flag);
+	wakeup(&dev->context_wait);
 
 	return 0;
 }
 
-int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_resctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
 {
 	struct drm_ctx_res *res = data;
 	struct drm_ctx ctx;
@@ -221,12 +326,11 @@
 	int i;
 
 	if (res->count >= DRM_RESERVED_CONTEXTS) {
-		bzero(&ctx, sizeof(ctx));
+		memset(&ctx, 0, sizeof(ctx));
 		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
 			ctx.handle = i;
-			if (DRM_COPY_TO_USER(&res->contexts[i],
-			    &ctx, sizeof(ctx)))
-				return EFAULT;
+			if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
+				return -EFAULT;
 		}
 	}
 	res->count = DRM_RESERVED_CONTEXTS;
@@ -234,8 +338,21 @@
 	return 0;
 }
 
-int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+int drm_addctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
 {
+	struct drm_ctx_list *ctx_entry;
 	struct drm_ctx *ctx = data;
 
 	ctx->handle = drm_ctxbitmap_next(dev);
@@ -247,15 +364,24 @@
 	if (ctx->handle == -1) {
 		DRM_DEBUG("Not enough free contexts.\n");
 		/* Should this return -EBUSY instead? */
-		return ENOMEM;
+		return -ENOMEM;
 	}
 
-	if (dev->driver->context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
-		DRM_LOCK(dev);
-		dev->driver->context_ctor(dev, ctx->handle);
-		DRM_UNLOCK(dev);
+	ctx_entry = malloc(sizeof(*ctx_entry), DRM_MEM_CTXBITMAP, M_NOWAIT);
+	if (!ctx_entry) {
+		DRM_DEBUG("out of memory\n");
+		return -ENOMEM;
 	}
 
+	INIT_LIST_HEAD(&ctx_entry->head);
+	ctx_entry->handle = ctx->handle;
+	ctx_entry->tag = file_priv;
+
+	DRM_LOCK(dev);
+	list_add(&ctx_entry->head, &dev->ctxlist);
+	++dev->ctx_count;
+	DRM_UNLOCK(dev);
+
 	return 0;
 }
 
@@ -265,6 +391,15 @@
 	return 0;
 }
 
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
 int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
 	struct drm_ctx *ctx = data;
@@ -275,6 +410,17 @@
 	return 0;
 }
 
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
 int drm_switchctx(struct drm_device *dev, void *data,
 		  struct drm_file *file_priv)
 {
@@ -284,30 +430,66 @@
 	return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
 
-int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+int drm_newctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
 {
 	struct drm_ctx *ctx = data;
 
 	DRM_DEBUG("%d\n", ctx->handle);
-	drm_context_switch_complete(dev, ctx->handle);
+	drm_context_switch_complete(dev, file_priv, ctx->handle);
 
 	return 0;
 }
 
-int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+int drm_rmctx(struct drm_device *dev, void *data,
+	      struct drm_file *file_priv)
 {
 	struct drm_ctx *ctx = data;
 
 	DRM_DEBUG("%d\n", ctx->handle);
 	if (ctx->handle != DRM_KERNEL_CONTEXT) {
-		if (dev->driver->context_dtor) {
-			DRM_LOCK(dev);
+		if (dev->driver->context_dtor)
 			dev->driver->context_dtor(dev, ctx->handle);
-			DRM_UNLOCK(dev);
-		}
-
 		drm_ctxbitmap_free(dev, ctx->handle);
 	}
 
+	DRM_LOCK(dev);
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+			if (pos->handle == ctx->handle) {
+				list_del(&pos->head);
+				free(pos, DRM_MEM_CTXBITMAP);
+				--dev->ctx_count;
+			}
+		}
+	}
+	DRM_UNLOCK(dev);
+
 	return 0;
 }
+
+/*@}*/

Modified: trunk/sys/dev/drm2/drm_crtc.c
===================================================================
--- trunk/sys/dev/drm2/drm_crtc.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_crtc.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -31,21 +31,22 @@
  *      Jesse Barnes <jesse.barnes at intel.com>
  */
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_crtc.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_crtc.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-#include <dev/drm2/drm.h>
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm_crtc.h>
 #include <dev/drm2/drm_edid.h>
 #include <dev/drm2/drm_fourcc.h>
-#include <sys/limits.h>
 
+static void drm_property_destroy_blob(struct drm_device *dev,
+    struct drm_property_blob *blob);
+
 /* Avoid boilerplate.  I'm tired of typing. */
 #define DRM_ENUM_NAME_FN(fnname, list)				\
 	char *fnname(int val)					\
 	{							\
 		int i;						\
-		for (i = 0; i < DRM_ARRAY_SIZE(list); i++) {	\
+		for (i = 0; i < ARRAY_SIZE(list); i++) {	\
 			if (list[i].type == val)		\
 				return list[i].name;		\
 		}						\
@@ -161,6 +162,7 @@
 	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
 	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
 	{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
 };
 
 static struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -169,6 +171,7 @@
 	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
 	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
 	{ DRM_MODE_ENCODER_TVDAC, "TV" },
+	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
 };
 
 char *drm_get_encoder_name(struct drm_encoder *encoder)
@@ -180,6 +183,7 @@
 		 encoder->base.id);
 	return buf;
 }
+EXPORT_SYMBOL(drm_get_encoder_name);
 
 char *drm_get_connector_name(struct drm_connector *connector)
 {
@@ -190,6 +194,7 @@
 		 connector->connector_type_id);
 	return buf;
 }
+EXPORT_SYMBOL(drm_get_connector_name);
 
 char *drm_get_connector_status_name(enum drm_connector_status status)
 {
@@ -219,13 +224,12 @@
 static int drm_mode_object_get(struct drm_device *dev,
 			       struct drm_mode_object *obj, uint32_t obj_type)
 {
-	int new_id;
+	int new_id = 0;
 	int ret;
 
-	new_id = 0;
 	ret = drm_gem_name_create(&dev->mode_config.crtc_names, obj, &new_id);
-	if (ret != 0)
-		return (ret);
+	if (ret)
+		return ret;
 
 	obj->id = new_id;
 	obj->type = obj_type;
@@ -252,7 +256,7 @@
 struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
 		uint32_t id, uint32_t type)
 {
-	struct drm_mode_object *obj;
+	struct drm_mode_object *obj = NULL;
 
 	obj = drm_gem_name_ref(&dev->mode_config.crtc_names, id, NULL);
 	if (!obj || (obj->type != type) || (obj->id != id))
@@ -260,6 +264,7 @@
 
 	return obj;
 }
+EXPORT_SYMBOL(drm_mode_object_find);
 
 /**
  * drm_framebuffer_init - initialize a framebuffer
@@ -279,7 +284,7 @@
 {
 	int ret;
 
-	DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
+	refcount_init(&fb->refcount, 1);
 
 	ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
 	if (ret)
@@ -292,8 +297,41 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_framebuffer_init);
 
+static void drm_framebuffer_free(struct drm_framebuffer *fb)
+{
+	fb->funcs->destroy(fb);
+}
+
 /**
+ * drm_framebuffer_unreference - unref a framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ */
+void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+	DRM_DEBUG("FB ID: %d\n", fb->base.id);
+	if (!sx_xlocked(&dev->mode_config.mutex))
+		DRM_WARNING("%s: dev->mode_config.mutex not locked\n", __func__);
+	if (refcount_release(&fb->refcount))
+		drm_framebuffer_free(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_unreference);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ */
+void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+	DRM_DEBUG("FB ID: %d\n", fb->base.id);
+	refcount_acquire(&fb->refcount);
+}
+EXPORT_SYMBOL(drm_framebuffer_reference);
+
+/**
  * drm_framebuffer_cleanup - remove a framebuffer object
  * @fb: framebuffer to remove
  *
@@ -306,13 +344,37 @@
 void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = fb->dev;
+	/*
+	 * This could be moved to drm_framebuffer_remove(), but for
+	 * debugging is nice to keep around the list of fb's that are
+	 * no longer associated w/ a drm_file but are not unreferenced
+	 * yet.  (i915 and omapdrm have debugfs files which will show
+	 * this.)
+	 */
+	drm_mode_object_put(dev, &fb->base);
+	list_del(&fb->head);
+	dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config.  If they're
+ * using @fb, removes it, setting it to NULL.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
 	struct drm_crtc *crtc;
 	struct drm_plane *plane;
 	struct drm_mode_set set;
 	int ret;
 
-	DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
-
 	/* remove from any CRTC */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		if (crtc->fb == fb) {
@@ -338,10 +400,11 @@
 		}
 	}
 
-	drm_mode_object_put(dev, &fb->base);
-	list_del(&fb->head);
-	dev->mode_config.num_fb--;
+	list_del(&fb->filp_head);
+
+	drm_framebuffer_unreference(fb);
 }
+EXPORT_SYMBOL(drm_framebuffer_remove);
 
 /**
  * drm_crtc_init - Initialise a new CRTC object
@@ -350,7 +413,7 @@
  * @funcs: callbacks for the new CRTC
  *
  * LOCKING:
- * Caller must hold mode config lock.
+ * Takes mode_config lock.
  *
  * Inits a new object created as base part of an driver crtc object.
  *
@@ -364,19 +427,25 @@
 
 	crtc->dev = dev;
 	crtc->funcs = funcs;
+	crtc->invert_dimensions = false;
 
 	sx_xlock(&dev->mode_config.mutex);
+
 	ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
 	if (ret)
 		goto out;
 
+	crtc->base.properties = &crtc->properties;
+
 	list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
 	dev->mode_config.num_crtc++;
-out:
+
+ out:
 	sx_xunlock(&dev->mode_config.mutex);
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_crtc_init);
 
 /**
  * drm_crtc_cleanup - Cleans up the core crtc usage.
@@ -392,17 +461,14 @@
 {
 	struct drm_device *dev = crtc->dev;
 
-	DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
+	free(crtc->gamma_store, DRM_MEM_KMS);
+	crtc->gamma_store = NULL;
 
-	if (crtc->gamma_store) {
-		free(crtc->gamma_store, DRM_MEM_KMS);
-		crtc->gamma_store = NULL;
-	}
-
 	drm_mode_object_put(dev, &crtc->base);
 	list_del(&crtc->head);
 	dev->mode_config.num_crtc--;
 }
+EXPORT_SYMBOL(drm_crtc_cleanup);
 
 /**
  * drm_mode_probed_add - add a mode to a connector's probed mode list
@@ -417,11 +483,9 @@
 void drm_mode_probed_add(struct drm_connector *connector,
 			 struct drm_display_mode *mode)
 {
-
-	DRM_MODE_CONFIG_ASSERT_LOCKED(connector->dev);
-
 	list_add(&mode->head, &connector->probed_modes);
 }
+EXPORT_SYMBOL(drm_mode_probed_add);
 
 /**
  * drm_mode_remove - remove and free a mode
@@ -436,12 +500,10 @@
 void drm_mode_remove(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
 {
-
-	DRM_MODE_CONFIG_ASSERT_LOCKED(connector->dev);
-
 	list_del(&mode->head);
 	drm_mode_destroy(connector->dev, mode);
 }
+EXPORT_SYMBOL(drm_mode_remove);
 
 /**
  * drm_connector_init - Init a preallocated connector
@@ -472,6 +534,7 @@
 	if (ret)
 		goto out;
 
+	connector->base.properties = &connector->properties;
 	connector->dev = dev;
 	connector->funcs = funcs;
 	connector->connector_type = connector_type;
@@ -481,21 +544,25 @@
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->modes);
 	connector->edid_blob_ptr = NULL;
+	connector->status = connector_status_unknown;
 
 	list_add_tail(&connector->head, &dev->mode_config.connector_list);
 	dev->mode_config.num_connector++;
 
-	drm_connector_attach_property(connector,
-				      dev->mode_config.edid_property, 0);
+	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+		drm_object_attach_property(&connector->base,
+					      dev->mode_config.edid_property,
+					      0);
 
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.dpms_property, 0);
 
-out:
+ out:
 	sx_xunlock(&dev->mode_config.mutex);
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_connector_init);
 
 /**
  * drm_connector_cleanup - cleans up an initialised connector
@@ -526,11 +593,25 @@
 	dev->mode_config.num_connector--;
 	sx_xunlock(&dev->mode_config.mutex);
 }
+EXPORT_SYMBOL(drm_connector_cleanup);
 
+void drm_connector_unplug_all(struct drm_device *dev)
+{
+#ifdef FREEBSD_NOTYET
+	struct drm_connector *connector;
+
+	/* taking the mode config mutex ends up in a clash with sysfs */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		drm_sysfs_connector_remove(connector);
+#endif /* FREEBSD_NOTYET */
+
+}
+EXPORT_SYMBOL(drm_connector_unplug_all);
+
 int drm_encoder_init(struct drm_device *dev,
-		     struct drm_encoder *encoder,
-		     const struct drm_encoder_funcs *funcs,
-		     int encoder_type)
+		      struct drm_encoder *encoder,
+		      const struct drm_encoder_funcs *funcs,
+		      int encoder_type)
 {
 	int ret;
 
@@ -547,16 +628,16 @@
 	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
 	dev->mode_config.num_encoder++;
 
-out:
+ out:
 	sx_xunlock(&dev->mode_config.mutex);
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_encoder_init);
 
 void drm_encoder_cleanup(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-
 	sx_xlock(&dev->mode_config.mutex);
 	drm_mode_object_put(dev, &encoder->base);
 	list_del(&encoder->head);
@@ -563,6 +644,7 @@
 	dev->mode_config.num_encoder--;
 	sx_xunlock(&dev->mode_config.mutex);
 }
+EXPORT_SYMBOL(drm_encoder_cleanup);
 
 int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
 		   unsigned long possible_crtcs,
@@ -578,10 +660,17 @@
 	if (ret)
 		goto out;
 
+	plane->base.properties = &plane->properties;
 	plane->dev = dev;
 	plane->funcs = funcs;
 	plane->format_types = malloc(sizeof(uint32_t) * format_count,
 	    DRM_MEM_KMS, M_WAITOK);
+	if (!plane->format_types) {
+		DRM_DEBUG_KMS("out of memory when allocating plane\n");
+		drm_mode_object_put(dev, &plane->base);
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
 	plane->format_count = format_count;
@@ -598,11 +687,12 @@
 		INIT_LIST_HEAD(&plane->head);
 	}
 
-out:
+ out:
 	sx_xunlock(&dev->mode_config.mutex);
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_plane_init);
 
 void drm_plane_cleanup(struct drm_plane *plane)
 {
@@ -618,6 +708,7 @@
 	}
 	sx_xunlock(&dev->mode_config.mutex);
 }
+EXPORT_SYMBOL(drm_plane_cleanup);
 
 /**
  * drm_mode_create - create a new display mode
@@ -637,13 +728,17 @@
 
 	nmode = malloc(sizeof(struct drm_display_mode), DRM_MEM_KMS,
 	    M_WAITOK | M_ZERO);
+	if (!nmode)
+		return NULL;
 
 	if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
 		free(nmode, DRM_MEM_KMS);
-		return (NULL);
+		return NULL;
 	}
+
 	return nmode;
 }
+EXPORT_SYMBOL(drm_mode_create);
 
 /**
  * drm_mode_destroy - remove a mode
@@ -664,6 +759,7 @@
 
 	free(mode, DRM_MEM_KMS);
 }
+EXPORT_SYMBOL(drm_mode_destroy);
 
 static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
 {
@@ -680,7 +776,7 @@
 
 	dpms = drm_property_create_enum(dev, 0,
 				   "DPMS", drm_dpms_enum_list,
-				    DRM_ARRAY_SIZE(drm_dpms_enum_list));
+				   ARRAY_SIZE(drm_dpms_enum_list));
 	dev->mode_config.dpms_property = dpms;
 
 	return 0;
@@ -704,17 +800,18 @@
 		drm_property_create_enum(dev, 0,
 				    "select subconnector",
 				    drm_dvi_i_select_enum_list,
-				    DRM_ARRAY_SIZE(drm_dvi_i_select_enum_list));
+				    ARRAY_SIZE(drm_dvi_i_select_enum_list));
 	dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
 
 	dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
 				    "subconnector",
 				    drm_dvi_i_subconnector_enum_list,
-				    DRM_ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+				    ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
 	dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
 
 /**
  * drm_create_tv_properties - create TV specific connector properties
@@ -743,7 +840,7 @@
 	tv_selector = drm_property_create_enum(dev, 0,
 					  "select subconnector",
 					  drm_tv_select_enum_list,
-					  DRM_ARRAY_SIZE(drm_tv_select_enum_list));
+					  ARRAY_SIZE(drm_tv_select_enum_list));
 	dev->mode_config.tv_select_subconnector_property = tv_selector;
 
 	tv_subconnector =
@@ -750,7 +847,7 @@
 		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
 				    "subconnector",
 				    drm_tv_subconnector_enum_list,
- 				    DRM_ARRAY_SIZE(drm_tv_subconnector_enum_list));
+				    ARRAY_SIZE(drm_tv_subconnector_enum_list));
 	dev->mode_config.tv_subconnector_property = tv_subconnector;
 
 	/*
@@ -795,6 +892,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
 
 /**
  * drm_mode_create_scaling_mode_property - create scaling mode property
@@ -813,12 +911,13 @@
 	scaling_mode =
 		drm_property_create_enum(dev, 0, "scaling mode",
 				drm_scaling_mode_enum_list,
-				    DRM_ARRAY_SIZE(drm_scaling_mode_enum_list));
+				    ARRAY_SIZE(drm_scaling_mode_enum_list));
 
 	dev->mode_config.scaling_mode_property = scaling_mode;
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
 
 /**
  * drm_mode_create_dithering_property - create dithering property
@@ -837,11 +936,12 @@
 	dithering_mode =
 		drm_property_create_enum(dev, 0, "dithering",
 				drm_dithering_mode_enum_list,
-				    DRM_ARRAY_SIZE(drm_dithering_mode_enum_list));
+				    ARRAY_SIZE(drm_dithering_mode_enum_list));
 	dev->mode_config.dithering_mode_property = dithering_mode;
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_mode_create_dithering_property);
 
 /**
  * drm_mode_create_dirty_property - create dirty property
@@ -861,11 +961,12 @@
 		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
 				    "dirty",
 				    drm_dirty_info_enum_list,
-				    DRM_ARRAY_SIZE(drm_dirty_info_enum_list));
+				    ARRAY_SIZE(drm_dirty_info_enum_list));
 	dev->mode_config.dirty_info_property = dirty_info;
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
 
 /**
  * drm_mode_config_init - initialize DRM mode_configuration structure
@@ -899,9 +1000,9 @@
 	dev->mode_config.num_crtc = 0;
 	dev->mode_config.num_encoder = 0;
 }
+EXPORT_SYMBOL(drm_mode_config_init);
 
-static int
-drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
 {
 	uint32_t total_objects = 0;
 
@@ -911,6 +1012,8 @@
 
 	group->id_list = malloc(total_objects * sizeof(uint32_t),
 	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
+	if (!group->id_list)
+		return -ENOMEM;
 
 	group->num_crtcs = 0;
 	group->num_connectors = 0;
@@ -918,6 +1021,12 @@
 	return 0;
 }
 
+void drm_mode_group_free(struct drm_mode_group *group)
+{
+	free(group->id_list, DRM_MEM_KMS);
+	group->id_list = NULL;
+}
+
 int drm_mode_group_init_legacy_group(struct drm_device *dev,
 				     struct drm_mode_group *group)
 {
@@ -942,6 +1051,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
 
 /**
  * drm_mode_config_cleanup - free up DRM mode_config info
@@ -962,6 +1072,7 @@
 	struct drm_encoder *encoder, *enct;
 	struct drm_framebuffer *fb, *fbt;
 	struct drm_property *property, *pt;
+	struct drm_property_blob *blob, *bt;
 	struct drm_plane *plane, *plt;
 
 	list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
@@ -979,12 +1090,13 @@
 		drm_property_destroy(dev, property);
 	}
 
-	list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
-		fb->funcs->destroy(fb);
+        list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+				 head) {
+		drm_property_destroy_blob(dev, blob);
 	}
 
-	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
-		crtc->funcs->destroy(crtc);
+	list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+		drm_framebuffer_remove(fb);
 	}
 
 	list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
@@ -991,8 +1103,14 @@
 				 head) {
 		plane->funcs->destroy(plane);
 	}
+
+	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+		crtc->funcs->destroy(crtc);
+	}
+
 	drm_gem_names_fini(&dev->mode_config.crtc_names);
 }
+EXPORT_SYMBOL(drm_mode_config_cleanup);
 
 /**
  * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
@@ -1009,11 +1127,11 @@
 				      const struct drm_display_mode *in)
 {
 	if (in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
-	    in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
-	    in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
-	    in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
-	    in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX)
-		printf("timing values too large for mode info\n");
+	     in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
+	     in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
+	     in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
+	     in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX)
+		DRM_WARNING("timing values too large for mode info\n");
 
 	out->clock = in->clock;
 	out->hdisplay = in->hdisplay;
@@ -1051,7 +1169,7 @@
 				  const struct drm_mode_modeinfo *in)
 {
 	if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
-		return ERANGE;
+		return -ERANGE;
 
 	out->clock = in->clock;
 	out->hdisplay = in->hdisplay;
@@ -1113,7 +1231,7 @@
 	struct drm_mode_group *mode_group;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 
@@ -1124,13 +1242,8 @@
 	list_for_each(lh, &file_priv->fbs)
 		fb_count++;
 
-#if 1
-	mode_group = NULL; /* XXXKIB */
-	if (1 || file_priv->master) {
-#else
-	mode_group = &file_priv->masterp->minor->mode_group;
-	if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
-#endif
+	mode_group = &file_priv->master->minor->mode_group;
+	if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
 
 		list_for_each(lh, &dev->mode_config.crtc_list)
 			crtc_count++;
@@ -1156,11 +1269,10 @@
 	/* FBs */
 	if (card_res->count_fbs >= fb_count) {
 		copied = 0;
-		fb_id = (uint32_t *)(uintptr_t)card_res->fb_id_ptr;
+		fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
 		list_for_each_entry(fb, &file_priv->fbs, filp_head) {
-			if (copyout(&fb->base.id, fb_id + copied,
-			    sizeof(uint32_t))) {
-				ret = EFAULT;
+			if (put_user(fb->base.id, fb_id + copied)) {
+				ret = -EFAULT;
 				goto out;
 			}
 			copied++;
@@ -1171,18 +1283,13 @@
 	/* CRTCs */
 	if (card_res->count_crtcs >= crtc_count) {
 		copied = 0;
-		crtc_id = (uint32_t *)(uintptr_t)card_res->crtc_id_ptr;
-#if 1
-		if (1 || file_priv->master) {
-#else
-		if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
-#endif
+		crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+		if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
 			list_for_each_entry(crtc, &dev->mode_config.crtc_list,
 					    head) {
 				DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
-				if (copyout(&crtc->base.id, crtc_id +
-				    copied, sizeof(uint32_t))) {
-					ret = EFAULT;
+				if (put_user(crtc->base.id, crtc_id + copied)) {
+					ret = -EFAULT;
 					goto out;
 				}
 				copied++;
@@ -1189,9 +1296,9 @@
 			}
 		} else {
 			for (i = 0; i < mode_group->num_crtcs; i++) {
-				if (copyout(&mode_group->id_list[i],
-				    crtc_id + copied, sizeof(uint32_t))) {
-					ret = EFAULT;
+				if (put_user(mode_group->id_list[i],
+					     crtc_id + copied)) {
+					ret = -EFAULT;
 					goto out;
 				}
 				copied++;
@@ -1203,31 +1310,25 @@
 	/* Encoders */
 	if (card_res->count_encoders >= encoder_count) {
 		copied = 0;
-		encoder_id = (uint32_t *)(uintptr_t)card_res->encoder_id_ptr;
-#if 1
-		if (file_priv->master) {
-#else
-		if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
-#endif
+		encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+		if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
 			list_for_each_entry(encoder,
 					    &dev->mode_config.encoder_list,
 					    head) {
 				DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
 						drm_get_encoder_name(encoder));
-				if (copyout(&encoder->base.id, encoder_id +
-				    copied, sizeof(uint32_t))) {
-					ret = EFAULT;
+				if (put_user(encoder->base.id, encoder_id +
+					     copied)) {
+					ret = -EFAULT;
 					goto out;
 				}
 				copied++;
 			}
 		} else {
-			for (i = mode_group->num_crtcs;
-			    i < mode_group->num_crtcs + mode_group->num_encoders;
-			     i++) {
-				if (copyout(&mode_group->id_list[i],
-				    encoder_id + copied, sizeof(uint32_t))) {
-					ret = EFAULT;
+			for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+				if (put_user(mode_group->id_list[i],
+					     encoder_id + copied)) {
+					ret = -EFAULT;
 					goto out;
 				}
 				copied++;
@@ -1240,12 +1341,8 @@
 	/* Connectors */
 	if (card_res->count_connectors >= connector_count) {
 		copied = 0;
-		connector_id = (uint32_t *)(uintptr_t)card_res->connector_id_ptr;
-#if 1
-		if (file_priv->master) {
-#else
-		if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
-#endif
+		connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+		if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
 			list_for_each_entry(connector,
 					    &dev->mode_config.connector_list,
 					    head) {
@@ -1252,9 +1349,9 @@
 				DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 					connector->base.id,
 					drm_get_connector_name(connector));
-				if (copyout(&connector->base.id,
-				    connector_id + copied, sizeof(uint32_t))) {
-					ret = EFAULT;
+				if (put_user(connector->base.id,
+					     connector_id + copied)) {
+					ret = -EFAULT;
 					goto out;
 				}
 				copied++;
@@ -1263,9 +1360,9 @@
 			int start = mode_group->num_crtcs +
 				mode_group->num_encoders;
 			for (i = start; i < start + mode_group->num_connectors; i++) {
-				if (copyout(&mode_group->id_list[i],
-				    connector_id + copied, sizeof(uint32_t))) {
-					ret = EFAULT;
+				if (put_user(mode_group->id_list[i],
+					     connector_id + copied)) {
+					ret = -EFAULT;
 					goto out;
 				}
 				copied++;
@@ -1308,7 +1405,7 @@
 	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 
@@ -1315,7 +1412,7 @@
 	obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
 				   DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = (EINVAL);
+		ret = -EINVAL;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -1374,12 +1471,12 @@
 	int i;
 	struct drm_mode_modeinfo u_mode;
 	struct drm_mode_modeinfo __user *mode_ptr;
-	uint32_t *prop_ptr;
-	uint64_t *prop_values;
-	uint32_t *encoder_ptr;
+	uint32_t __user *prop_ptr;
+	uint64_t __user *prop_values;
+	uint32_t __user *encoder_ptr;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
 
@@ -1390,16 +1487,12 @@
 	obj = drm_mode_object_find(dev, out_resp->connector_id,
 				   DRM_MODE_OBJECT_CONNECTOR);
 	if (!obj) {
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 	connector = obj_to_connector(obj);
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] != 0) {
-			props_count++;
-		}
-	}
+	props_count = connector->properties.count;
 
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
 		if (connector->encoder_ids[i] != 0) {
@@ -1435,12 +1528,12 @@
 	 */
 	if ((out_resp->count_modes >= mode_count) && mode_count) {
 		copied = 0;
-		mode_ptr = (struct drm_mode_modeinfo *)(uintptr_t)out_resp->modes_ptr;
+		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
 		list_for_each_entry(mode, &connector->modes, head) {
 			drm_crtc_convert_to_umode(&u_mode, mode);
-			if (copyout(&u_mode, mode_ptr + copied,
-			    sizeof(u_mode))) {
-				ret = EFAULT;
+			if (copy_to_user(mode_ptr + copied,
+					 &u_mode, sizeof(u_mode))) {
+				ret = -EFAULT;
 				goto out;
 			}
 			copied++;
@@ -1450,23 +1543,21 @@
 
 	if ((out_resp->count_props >= props_count) && props_count) {
 		copied = 0;
-		prop_ptr = (uint32_t *)(uintptr_t)(out_resp->props_ptr);
-		prop_values = (uint64_t *)(uintptr_t)(out_resp->prop_values_ptr);
-		for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-			if (connector->property_ids[i] != 0) {
-				if (copyout(&connector->property_ids[i],
-				    prop_ptr + copied, sizeof(uint32_t))) {
-					ret = EFAULT;
-					goto out;
-				}
+		prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+		prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
+		for (i = 0; i < connector->properties.count; i++) {
+			if (put_user(connector->properties.ids[i],
+				     prop_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
 
-				if (copyout(&connector->property_values[i],
-				    prop_values + copied, sizeof(uint64_t))) {
-					ret = EFAULT;
-					goto out;
-				}
-				copied++;
+			if (put_user(connector->properties.values[i],
+				     prop_values + copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
+			copied++;
 		}
 	}
 	out_resp->count_props = props_count;
@@ -1473,12 +1564,12 @@
 
 	if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
 		copied = 0;
-		encoder_ptr = (uint32_t *)(uintptr_t)(out_resp->encoders_ptr);
+		encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
 		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
 			if (connector->encoder_ids[i] != 0) {
-				if (copyout(&connector->encoder_ids[i],
-				    encoder_ptr + copied, sizeof(uint32_t))) {
-					ret = EFAULT;
+				if (put_user(connector->encoder_ids[i],
+					     encoder_ptr + copied)) {
+					ret = -EFAULT;
 					goto out;
 				}
 				copied++;
@@ -1501,13 +1592,13 @@
 	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 	obj = drm_mode_object_find(dev, enc_resp->encoder_id,
 				   DRM_MODE_OBJECT_ENCODER);
 	if (!obj) {
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 	encoder = obj_to_encoder(obj);
@@ -1543,11 +1634,11 @@
 	struct drm_mode_get_plane_res *plane_resp = data;
 	struct drm_mode_config *config;
 	struct drm_plane *plane;
-	uint32_t *plane_ptr;
+	uint32_t __user *plane_ptr;
 	int copied = 0, ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 	config = &dev->mode_config;
@@ -1558,12 +1649,11 @@
 	 */
 	if (config->num_plane &&
 	    (plane_resp->count_planes >= config->num_plane)) {
-		plane_ptr = (uint32_t *)(unsigned long)plane_resp->plane_id_ptr;
+		plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
 		list_for_each_entry(plane, &config->plane_list, head) {
-			if (copyout(&plane->base.id, plane_ptr + copied,
-			    sizeof(uint32_t))) {
-				ret = EFAULT;
+			if (put_user(plane->base.id, plane_ptr + copied)) {
+				ret = -EFAULT;
 				goto out;
 			}
 			copied++;
@@ -1594,17 +1684,17 @@
 	struct drm_mode_get_plane *plane_resp = data;
 	struct drm_mode_object *obj;
 	struct drm_plane *plane;
-	uint32_t *format_ptr;
+	uint32_t __user *format_ptr;
 	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 	obj = drm_mode_object_find(dev, plane_resp->plane_id,
 				   DRM_MODE_OBJECT_PLANE);
 	if (!obj) {
-		ret = ENOENT;
+		ret = -ENOENT;
 		goto out;
 	}
 	plane = obj_to_plane(obj);
@@ -1629,11 +1719,11 @@
 	 */
 	if (plane->format_count &&
 	    (plane_resp->count_format_types >= plane->format_count)) {
-		format_ptr = (uint32_t *)(unsigned long)plane_resp->format_type_ptr;
-		if (copyout(format_ptr,
+		format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+		if (copy_to_user(format_ptr,
 				 plane->format_types,
 				 sizeof(uint32_t) * plane->format_count)) {
-			ret = EFAULT;
+			ret = -EFAULT;
 			goto out;
 		}
 	}
@@ -1669,7 +1759,7 @@
 	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 
@@ -1682,7 +1772,7 @@
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown plane ID %d\n",
 			      plane_req->plane_id);
-		ret = ENOENT;
+		ret = -ENOENT;
 		goto out;
 	}
 	plane = obj_to_plane(obj);
@@ -1700,7 +1790,7 @@
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown crtc ID %d\n",
 			      plane_req->crtc_id);
-		ret = ENOENT;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -1710,7 +1800,7 @@
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
 			      plane_req->fb_id);
-		ret = ENOENT;
+		ret = -ENOENT;
 		goto out;
 	}
 	fb = obj_to_fb(obj);
@@ -1721,7 +1811,7 @@
 			break;
 	if (i == plane->format_count) {
 		DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 
@@ -1743,7 +1833,7 @@
 			      ((plane_req->src_x & 0xffff) * 15625) >> 10,
 			      plane_req->src_y >> 16,
 			      ((plane_req->src_y & 0xffff) * 15625) >> 10);
-		ret = ENOSPC;
+		ret = -ENOSPC;
 		goto out;
 	}
 
@@ -1755,11 +1845,11 @@
 		DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
 			      plane_req->crtc_w, plane_req->crtc_h,
 			      plane_req->crtc_x, plane_req->crtc_y);
-		ret = ERANGE;
+		ret = -ERANGE;
 		goto out;
 	}
 
-	ret = -plane->funcs->update_plane(plane, crtc, fb,
+	ret = plane->funcs->update_plane(plane, crtc, fb,
 					 plane_req->crtc_x, plane_req->crtc_y,
 					 plane_req->crtc_w, plane_req->crtc_h,
 					 plane_req->src_x, plane_req->src_y,
@@ -1803,16 +1893,16 @@
 	struct drm_framebuffer *fb = NULL;
 	struct drm_display_mode *mode = NULL;
 	struct drm_mode_set set;
-	uint32_t *set_connectors_ptr;
-	int ret = 0;
+	uint32_t __user *set_connectors_ptr;
+	int ret;
 	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	/* For some reason crtc x/y offsets are signed internally. */
 	if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
-		return (ERANGE);
+		return -ERANGE;
 
 	sx_xlock(&dev->mode_config.mutex);
 	obj = drm_mode_object_find(dev, crtc_req->crtc_id,
@@ -1819,7 +1909,7 @@
 				   DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -1826,6 +1916,7 @@
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
 	if (crtc_req->mode_valid) {
+		int hdisplay, vdisplay;
 		/* If we have a mode we need a framebuffer. */
 		/* If we pass -1, set the mode with the currently bound fb */
 		if (crtc_req->fb_id == -1) {
@@ -1841,7 +1932,7 @@
 			if (!obj) {
 				DRM_DEBUG_KMS("Unknown FB ID%d\n",
 						crtc_req->fb_id);
-				ret = EINVAL;
+				ret = -EINVAL;
 				goto out;
 			}
 			fb = obj_to_fb(obj);
@@ -1849,7 +1940,7 @@
 
 		mode = drm_mode_create(dev);
 		if (!mode) {
-			ret = ENOMEM;
+			ret = -ENOMEM;
 			goto out;
 		}
 
@@ -1861,15 +1952,25 @@
 
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
-		if (mode->hdisplay > fb->width ||
-		    mode->vdisplay > fb->height ||
-		    crtc_req->x > fb->width - mode->hdisplay ||
-		    crtc_req->y > fb->height - mode->vdisplay) {
-			DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n",
-				      mode->hdisplay, mode->vdisplay,
-				      crtc_req->x, crtc_req->y,
-				      fb->width, fb->height);
-			ret = ENOSPC;
+		hdisplay = mode->hdisplay;
+		vdisplay = mode->vdisplay;
+
+		if (crtc->invert_dimensions) {
+			int tmp;
+			tmp = vdisplay;
+			vdisplay = hdisplay;
+			hdisplay = tmp;
+		}
+
+		if (hdisplay > fb->width ||
+		    vdisplay > fb->height ||
+		    crtc_req->x > fb->width - hdisplay ||
+		    crtc_req->y > fb->height - vdisplay) {
+			DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+				      fb->width, fb->height,
+				      hdisplay, vdisplay, crtc_req->x, crtc_req->y,
+				      crtc->invert_dimensions ? " (inverted)" : "");
+			ret = -ENOSPC;
 			goto out;
 		}
 	}
@@ -1876,7 +1977,7 @@
 
 	if (crtc_req->count_connectors == 0 && mode) {
 		DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 
@@ -1883,7 +1984,7 @@
 	if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
 		DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
 			  crtc_req->count_connectors);
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 
@@ -1892,17 +1993,22 @@
 
 		/* Avoid unbounded kernel memory allocation */
 		if (crtc_req->count_connectors > config->num_connector) {
-			ret = EINVAL;
+			ret = -EINVAL;
 			goto out;
 		}
 
 		connector_set = malloc(crtc_req->count_connectors *
-		    sizeof(struct drm_connector *), DRM_MEM_KMS, M_WAITOK);
+					sizeof(struct drm_connector *),
+					DRM_MEM_KMS, M_WAITOK);
+		if (!connector_set) {
+			ret = -ENOMEM;
+			goto out;
+		}
 
 		for (i = 0; i < crtc_req->count_connectors; i++) {
-			set_connectors_ptr = (uint32_t *)(uintptr_t)crtc_req->set_connectors_ptr;
-			if (copyin(&set_connectors_ptr[i], &out_id, sizeof(uint32_t))) {
-				ret = EFAULT;
+			set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
+			if (get_user(out_id, &set_connectors_ptr[i])) {
+				ret = -EFAULT;
 				goto out;
 			}
 
@@ -1911,7 +2017,7 @@
 			if (!obj) {
 				DRM_DEBUG_KMS("Connector id %d unknown\n",
 						out_id);
-				ret = EINVAL;
+				ret = -EINVAL;
 				goto out;
 			}
 			connector = obj_to_connector(obj);
@@ -1948,16 +2054,16 @@
 	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
-	if (!req->flags)
-		return (EINVAL);
+	if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 	obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -1964,11 +2070,11 @@
 
 	if (req->flags & DRM_MODE_CURSOR_BO) {
 		if (!crtc->funcs->cursor_set) {
-			ret = ENXIO;
+			ret = -ENXIO;
 			goto out;
 		}
 		/* Turns off the cursor if handle is 0 */
-		ret = -crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+		ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
 					      req->width, req->height);
 	}
 
@@ -1976,7 +2082,7 @@
 		if (crtc->funcs->cursor_move) {
 			ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
 		} else {
-			ret = EFAULT;
+			ret = -EFAULT;
 			goto out;
 		}
 	}
@@ -1992,7 +2098,7 @@
 
 	switch (bpp) {
 	case 8:
-		fmt = DRM_FORMAT_RGB332;
+		fmt = DRM_FORMAT_C8;
 		break;
 	case 16:
 		if (depth == 15)
@@ -2012,7 +2118,7 @@
 			fmt = DRM_FORMAT_ARGB8888;
 		break;
 	default:
-		DRM_ERROR("bad bpp, assuming RGB24 pixel format\n");
+		DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
 		fmt = DRM_FORMAT_XRGB8888;
 		break;
 	}
@@ -2019,6 +2125,7 @@
 
 	return fmt;
 }
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
 
 /**
  * drm_mode_addfb - add an FB to the graphics configuration
@@ -2055,18 +2162,22 @@
 	r.handles[0] = or->handle;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	if ((config->min_width > r.width) || (r.width > config->max_width))
-		return (EINVAL);
+		return -EINVAL;
+
 	if ((config->min_height > r.height) || (r.height > config->max_height))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 
-	ret = -dev->mode_config.funcs->fb_create(dev, file_priv, &r, &fb);
+	/* TODO check buffer is sufficiently large */
+	/* TODO setup destructor callback */
+
+	ret = dev->mode_config.funcs->fb_create(dev, file_priv, &r, &fb);
 	if (ret != 0) {
-		DRM_ERROR("could not create framebuffer, error %d\n", ret);
+		DRM_DEBUG_KMS("could not create framebuffer\n");
 		goto out;
 	}
 
@@ -2079,7 +2190,7 @@
 	return ret;
 }
 
-static int format_check(struct drm_mode_fb_cmd2 *r)
+static int format_check(const struct drm_mode_fb_cmd2 *r)
 {
 	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
 
@@ -2132,6 +2243,8 @@
 	case DRM_FORMAT_NV21:
 	case DRM_FORMAT_NV16:
 	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
 	case DRM_FORMAT_YUV410:
 	case DRM_FORMAT_YVU410:
 	case DRM_FORMAT_YUV411:
@@ -2144,10 +2257,59 @@
 	case DRM_FORMAT_YVU444:
 		return 0;
 	default:
-		return (EINVAL);
+		return -EINVAL;
 	}
 }
 
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+	int ret, hsub, vsub, num_planes, i;
+
+	ret = format_check(r);
+	if (ret) {
+		DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
+		return ret;
+	}
+
+	hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+	vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+	num_planes = drm_format_num_planes(r->pixel_format);
+
+	if (r->width == 0 || r->width % hsub) {
+		DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+		return -EINVAL;
+	}
+
+	if (r->height == 0 || r->height % vsub) {
+		DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_planes; i++) {
+		unsigned int width = r->width / (i != 0 ? hsub : 1);
+		unsigned int height = r->height / (i != 0 ? vsub : 1);
+		unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+		if (!r->handles[i]) {
+			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+			return -EINVAL;
+		}
+
+		if ((uint64_t) width * cpp > UINT_MAX)
+			return -ERANGE;
+
+		if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+			return -ERANGE;
+
+		if (r->pitches[i] < width * cpp) {
+			DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 /**
  * drm_mode_addfb2 - add an FB to the graphics configuration
  * @inode: inode from the ioctl
@@ -2171,36 +2333,36 @@
 	struct drm_mode_fb_cmd2 *r = data;
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_framebuffer *fb;
-	int ret = 0;
+	int ret;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
+	if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+		return -EINVAL;
+	}
+
 	if ((config->min_width > r->width) || (r->width > config->max_width)) {
-		DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
 			  r->width, config->min_width, config->max_width);
-		return (EINVAL);
+		return -EINVAL;
 	}
 	if ((config->min_height > r->height) || (r->height > config->max_height)) {
-		DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+		DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
 			  r->height, config->min_height, config->max_height);
-		return (EINVAL);
+		return -EINVAL;
 	}
 
-	ret = format_check(r);
-	if (ret) {
-		DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+	ret = framebuffer_check(r);
+	if (ret)
 		return ret;
-	}
 
 	sx_xlock(&dev->mode_config.mutex);
 
-	/* TODO check buffer is sufficiently large */
-	/* TODO setup destructor callback */
-
-	ret = -dev->mode_config.funcs->fb_create(dev, file_priv, r, &fb);
+	ret = dev->mode_config.funcs->fb_create(dev, file_priv, r, &fb);
 	if (ret != 0) {
-		DRM_ERROR("could not create framebuffer, error %d\n", ret);
+		DRM_DEBUG_KMS("could not create framebuffer\n");
 		goto out;
 	}
 
@@ -2210,7 +2372,7 @@
 
 out:
 	sx_xunlock(&dev->mode_config.mutex);
-	return (ret);
+	return ret;
 }
 
 /**
@@ -2241,13 +2403,13 @@
 	int found = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 	obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
 	/* TODO check that we really get a framebuffer back. */
 	if (!obj) {
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 	fb = obj_to_fb(obj);
@@ -2257,16 +2419,12 @@
 			found = 1;
 
 	if (!found) {
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 
-	/* TODO release all crtc connected to the framebuffer */
-	/* TODO unhock the destructor from the buffer object */
+	drm_framebuffer_remove(fb);
 
-	list_del(&fb->filp_head);
-	fb->funcs->destroy(fb);
-
 out:
 	sx_xunlock(&dev->mode_config.mutex);
 	return ret;
@@ -2298,12 +2456,12 @@
 	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 	obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
 	if (!obj) {
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out;
 	}
 	fb = obj_to_fb(obj);
@@ -2313,6 +2471,7 @@
 	r->depth = fb->depth;
 	r->bpp = fb->bits_per_pixel;
 	r->pitch = fb->pitches[0];
+	r->handle = 0;
 	fb->funcs->create_handle(fb, file_priv, &r->handle);
 
 out:
@@ -2330,24 +2489,24 @@
 	struct drm_framebuffer *fb;
 	unsigned flags;
 	int num_clips;
-	int ret = 0;
+	int ret;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 	obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
 	if (!obj) {
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out_err1;
 	}
 	fb = obj_to_fb(obj);
 
 	num_clips = r->num_clips;
-	clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
 
 	if (!num_clips != !clips_ptr) {
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out_err1;
 	}
 
@@ -2355,28 +2514,35 @@
 
 	/* If userspace annotates copy, clips must come in pairs */
 	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
-		ret = EINVAL;
+		ret = -EINVAL;
 		goto out_err1;
 	}
 
 	if (num_clips && clips_ptr) {
 		if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
-			ret = EINVAL;
+			ret = -EINVAL;
 			goto out_err1;
 		}
 		clips = malloc(num_clips * sizeof(*clips), DRM_MEM_KMS,
 		    M_WAITOK | M_ZERO);
+		if (!clips) {
+			ret = -ENOMEM;
+			goto out_err1;
+		}
 
-		ret = copyin(clips_ptr, clips, num_clips * sizeof(*clips));
-		if (ret)
+		ret = copy_from_user(clips, clips_ptr,
+				     num_clips * sizeof(*clips));
+		if (ret) {
+			ret = -EFAULT;
 			goto out_err2;
+		}
 	}
 
 	if (fb->funcs->dirty) {
-		ret = -fb->funcs->dirty(fb, file_priv, flags, r->color,
+		ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
 				       clips, num_clips);
 	} else {
-		ret = ENOSYS;
+		ret = -ENOSYS;
 		goto out_err2;
 	}
 
@@ -2404,17 +2570,12 @@
  */
 void drm_fb_release(struct drm_file *priv)
 {
-#if 1
-	struct drm_device *dev = priv->dev;
-#else
 	struct drm_device *dev = priv->minor->dev;
-#endif
 	struct drm_framebuffer *fb, *tfb;
 
 	sx_xlock(&dev->mode_config.mutex);
 	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
-		list_del(&fb->filp_head);
-		fb->funcs->destroy(fb);
+		drm_framebuffer_remove(fb);
 	}
 	sx_xunlock(&dev->mode_config.mutex);
 }
@@ -2448,7 +2609,7 @@
 		if (connector->encoder->crtc == crtc) {
 			dup_mode = drm_mode_duplicate(dev, mode);
 			if (!dup_mode) {
-				ret = ENOMEM;
+				ret = -ENOMEM;
 				goto out;
 			}
 			list_add_tail(&dup_mode->head, &list);
@@ -2470,6 +2631,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
 
 static int drm_mode_detachmode(struct drm_device *dev,
 			       struct drm_connector *connector,
@@ -2503,6 +2665,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
 
 /**
  * drm_fb_attachmode - Attach a user mode to an connector
@@ -2525,7 +2688,7 @@
 	struct drm_display_mode *mode;
 	struct drm_mode_object *obj;
 	struct drm_mode_modeinfo *umode = &mode_cmd->mode;
-	int ret = 0;
+	int ret;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -2579,7 +2742,7 @@
 	struct drm_connector *connector;
 	struct drm_display_mode mode;
 	struct drm_mode_modeinfo *umode = &mode_cmd->mode;
-	int ret = 0;
+	int ret;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -2613,15 +2776,20 @@
 
 	property = malloc(sizeof(struct drm_property), DRM_MEM_KMS,
 	    M_WAITOK | M_ZERO);
+	if (!property)
+		return NULL;
 
 	if (num_values) {
 		property->values = malloc(sizeof(uint64_t)*num_values, DRM_MEM_KMS,
 		    M_WAITOK | M_ZERO);
+		if (!property->values)
+			goto fail;
 	}
 
 	ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
 	if (ret)
 		goto fail;
+
 	property->flags = flags;
 	property->num_values = num_values;
 	INIT_LIST_HEAD(&property->enum_blob_list);
@@ -2633,12 +2801,12 @@
 
 	list_add_tail(&property->head, &dev->mode_config.property_list);
 	return property;
-
 fail:
 	free(property->values, DRM_MEM_KMS);
 	free(property, DRM_MEM_KMS);
-	return (NULL);
+	return NULL;
 }
+EXPORT_SYMBOL(drm_property_create);
 
 struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
 					 const char *name,
@@ -2666,7 +2834,36 @@
 
 	return property;
 }
+EXPORT_SYMBOL(drm_property_create_enum);
 
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+					 int flags, const char *name,
+					 const struct drm_prop_enum_list *props,
+					 int num_values)
+{
+	struct drm_property *property;
+	int i, ret;
+
+	flags |= DRM_MODE_PROP_BITMASK;
+
+	property = drm_property_create(dev, flags, name, num_values);
+	if (!property)
+		return NULL;
+
+	for (i = 0; i < num_values; i++) {
+		ret = drm_property_add_enum(property, i,
+				      props[i].type,
+				      props[i].name);
+		if (ret) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+	}
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
 struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
 					 const char *name,
 					 uint64_t min, uint64_t max)
@@ -2684,6 +2881,7 @@
 
 	return property;
 }
+EXPORT_SYMBOL(drm_property_create_range);
 
 int drm_property_add_enum(struct drm_property *property, int index,
 			  uint64_t value, const char *name)
@@ -2690,9 +2888,16 @@
 {
 	struct drm_property_enum *prop_enum;
 
-	if (!(property->flags & DRM_MODE_PROP_ENUM))
+	if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
 		return -EINVAL;
 
+	/*
+	 * Bitmask enum properties have the additional constraint of values
+	 * from 0 to 63
+	 */
+	if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
+		return -EINVAL;
+
 	if (!list_empty(&property->enum_blob_list)) {
 		list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
 			if (prop_enum->value == value) {
@@ -2705,6 +2910,8 @@
 
 	prop_enum = malloc(sizeof(struct drm_property_enum), DRM_MEM_KMS,
 	    M_WAITOK | M_ZERO);
+	if (!prop_enum)
+		return -ENOMEM;
 
 	strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
 	prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
@@ -2714,6 +2921,7 @@
 	list_add_tail(&prop_enum->head, &property->enum_blob_list);
 	return 0;
 }
+EXPORT_SYMBOL(drm_property_add_enum);
 
 void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
 {
@@ -2730,58 +2938,59 @@
 	list_del(&property->head);
 	free(property, DRM_MEM_KMS);
 }
+EXPORT_SYMBOL(drm_property_destroy);
 
-int drm_connector_attach_property(struct drm_connector *connector,
-			       struct drm_property *property, uint64_t init_val)
+void drm_object_attach_property(struct drm_mode_object *obj,
+				struct drm_property *property,
+				uint64_t init_val)
 {
-	int i;
+	int count = obj->properties->count;
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] == 0) {
-			connector->property_ids[i] = property->base.id;
-			connector->property_values[i] = init_val;
-			break;
-		}
+	if (count == DRM_OBJECT_MAX_PROPERTY) {
+		DRM_WARNING("Failed to attach object property (type: 0x%x). Please "
+			"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+			"you see this message on the same object type.\n",
+			obj->type);
+		return;
 	}
 
-	if (i == DRM_CONNECTOR_MAX_PROPERTY)
-		return -EINVAL;
-	return 0;
+	obj->properties->ids[count] = property->base.id;
+	obj->properties->values[count] = init_val;
+	obj->properties->count++;
 }
+EXPORT_SYMBOL(drm_object_attach_property);
 
-int drm_connector_property_set_value(struct drm_connector *connector,
-				  struct drm_property *property, uint64_t value)
+int drm_object_property_set_value(struct drm_mode_object *obj,
+				  struct drm_property *property, uint64_t val)
 {
 	int i;
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] == property->base.id) {
-			connector->property_values[i] = value;
-			break;
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->ids[i] == property->base.id) {
+			obj->properties->values[i] = val;
+			return 0;
 		}
 	}
 
-	if (i == DRM_CONNECTOR_MAX_PROPERTY)
-		return -EINVAL;
-	return 0;
+	return -EINVAL;
 }
+EXPORT_SYMBOL(drm_object_property_set_value);
 
-int drm_connector_property_get_value(struct drm_connector *connector,
+int drm_object_property_get_value(struct drm_mode_object *obj,
 				  struct drm_property *property, uint64_t *val)
 {
 	int i;
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] == property->base.id) {
-			*val = connector->property_values[i];
-			break;
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->ids[i] == property->base.id) {
+			*val = obj->properties->values[i];
+			return 0;
 		}
 	}
 
-	if (i == DRM_CONNECTOR_MAX_PROPERTY)
-		return -EINVAL;
-	return 0;
+	return -EINVAL;
 }
+EXPORT_SYMBOL(drm_object_property_get_value);
 
 int drm_mode_getproperty_ioctl(struct drm_device *dev,
 			       void *data, struct drm_file *file_priv)
@@ -2797,9 +3006,9 @@
 	struct drm_property_enum *prop_enum;
 	struct drm_mode_property_enum __user *enum_ptr;
 	struct drm_property_blob *prop_blob;
-	uint32_t *blob_id_ptr;
-	uint64_t *values_ptr;
-	uint32_t *blob_length_ptr;
+	uint32_t __user *blob_id_ptr;
+	uint64_t __user *values_ptr;
+	uint32_t __user *blob_length_ptr;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -2812,7 +3021,7 @@
 	}
 	property = obj_to_property(obj);
 
-	if (property->flags & DRM_MODE_PROP_ENUM) {
+	if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
 		list_for_each_entry(prop_enum, &property->enum_blob_list, head)
 			enum_count++;
 	} else if (property->flags & DRM_MODE_PROP_BLOB) {
@@ -2827,9 +3036,9 @@
 	out_resp->flags = property->flags;
 
 	if ((out_resp->count_values >= value_count) && value_count) {
-		values_ptr = (uint64_t *)(uintptr_t)out_resp->values_ptr;
+		values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
 		for (i = 0; i < value_count; i++) {
-			if (copyout(&property->values[i], values_ptr + i, sizeof(uint64_t))) {
+			if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
 				ret = -EFAULT;
 				goto done;
 			}
@@ -2837,19 +3046,19 @@
 	}
 	out_resp->count_values = value_count;
 
-	if (property->flags & DRM_MODE_PROP_ENUM) {
+	if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
 		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
 			copied = 0;
-			enum_ptr = (struct drm_mode_property_enum *)(uintptr_t)out_resp->enum_blob_ptr;
+			enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
 			list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
 
-				if (copyout(&prop_enum->value, &enum_ptr[copied].value, sizeof(uint64_t))) {
+				if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
 					ret = -EFAULT;
 					goto done;
 				}
 
-				if (copyout(&prop_enum->name,
-				    &enum_ptr[copied].name,DRM_PROP_NAME_LEN)) {
+				if (copy_to_user(&enum_ptr[copied].name,
+						 &prop_enum->name, DRM_PROP_NAME_LEN)) {
 					ret = -EFAULT;
 					goto done;
 				}
@@ -2862,18 +3071,16 @@
 	if (property->flags & DRM_MODE_PROP_BLOB) {
 		if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
 			copied = 0;
-			blob_id_ptr = (uint32_t *)(uintptr_t)out_resp->enum_blob_ptr;
-			blob_length_ptr = (uint32_t *)(uintptr_t)out_resp->values_ptr;
+			blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+			blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
 
 			list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
-				if (copyout(&prop_blob->base.id,
-				    blob_id_ptr + copied, sizeof(uint32_t))) {
+				if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
 					ret = -EFAULT;
 					goto done;
 				}
 
-				if (copyout(&prop_blob->length,
-				    blob_length_ptr + copied, sizeof(uint32_t))) {
+				if (put_user(prop_blob->length, blob_length_ptr + copied)) {
 					ret = -EFAULT;
 					goto done;
 				}
@@ -2897,13 +3104,15 @@
 	if (!length || !data)
 		return NULL;
 
-	blob = malloc(sizeof(struct drm_property_blob) + length, DRM_MEM_KMS,
+	blob = malloc(sizeof(struct drm_property_blob)+length, DRM_MEM_KMS,
 	    M_WAITOK | M_ZERO);
+	if (!blob)
+		return NULL;
 
 	ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
 	if (ret) {
 		free(blob, DRM_MEM_KMS);
-		return (NULL);
+		return NULL;
 	}
 
 	blob->length = length;
@@ -2929,7 +3138,7 @@
 	struct drm_mode_get_blob *out_resp = data;
 	struct drm_property_blob *blob;
 	int ret = 0;
-	void *blob_ptr;
+	void __user *blob_ptr;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -2943,8 +3152,8 @@
 	blob = obj_to_blob(obj);
 
 	if (out_resp->length == blob->length) {
-		blob_ptr = (void *)(unsigned long)out_resp->data;
-		if (copyout(blob->data, blob_ptr, blob->length)){
+		blob_ptr = (void __user *)(unsigned long)out_resp->data;
+		if (copy_to_user(blob_ptr, blob->data, blob->length)){
 			ret = -EFAULT;
 			goto done;
 		}
@@ -2960,7 +3169,7 @@
 					    struct edid *edid)
 {
 	struct drm_device *dev = connector->dev;
-	int ret = 0, size;
+	int ret, size;
 
 	if (connector->edid_blob_ptr)
 		drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2968,7 +3177,7 @@
 	/* Delete edid, when there is none. */
 	if (!edid) {
 		connector->edid_blob_ptr = NULL;
-		ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+		ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
 		return ret;
 	}
 
@@ -2975,23 +3184,121 @@
 	size = EDID_LENGTH * (1 + edid->extensions);
 	connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
 							    size, edid);
+	if (!connector->edid_blob_ptr)
+		return -EINVAL;
 
-	ret = drm_connector_property_set_value(connector,
+	ret = drm_object_property_set_value(&connector->base,
 					       dev->mode_config.edid_property,
 					       connector->edid_blob_ptr->base.id);
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
+static bool drm_property_change_is_valid(struct drm_property *property,
+					 uint64_t value)
+{
+	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+		return false;
+	if (property->flags & DRM_MODE_PROP_RANGE) {
+		if (value < property->values[0] || value > property->values[1])
+			return false;
+		return true;
+	} else if (property->flags & DRM_MODE_PROP_BITMASK) {
+		int i;
+		uint64_t valid_mask = 0;
+		for (i = 0; i < property->num_values; i++)
+			valid_mask |= (1ULL << property->values[i]);
+		return !(value & ~valid_mask);
+	} else if (property->flags & DRM_MODE_PROP_BLOB) {
+		/* Only the driver knows */
+		return true;
+	} else {
+		int i;
+		for (i = 0; i < property->num_values; i++)
+			if (property->values[i] == value)
+				return true;
+		return false;
+	}
+}
+
 int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
 				       void *data, struct drm_file *file_priv)
 {
-	struct drm_mode_connector_set_property *out_resp = data;
+	struct drm_mode_connector_set_property *conn_set_prop = data;
+	struct drm_mode_obj_set_property obj_set_prop = {
+		.value = conn_set_prop->value,
+		.prop_id = conn_set_prop->prop_id,
+		.obj_id = conn_set_prop->connector_id,
+		.obj_type = DRM_MODE_OBJECT_CONNECTOR
+	};
+
+	/* It does all the locking and checking we need */
+	return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+					   struct drm_property *property,
+					   uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_connector *connector = obj_to_connector(obj);
+
+	/* Do DPMS ourselves */
+	if (property == connector->dev->mode_config.dpms_property) {
+		if (connector->funcs->dpms)
+			(*connector->funcs->dpms)(connector, (int)value);
+		ret = 0;
+	} else if (connector->funcs->set_property)
+		ret = connector->funcs->set_property(connector, property, value);
+
+	/* store the property value if successful */
+	if (!ret)
+		drm_object_property_set_value(&connector->base, property, value);
+	return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+				      struct drm_property *property,
+				      uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_crtc *crtc = obj_to_crtc(obj);
+
+	if (crtc->funcs->set_property)
+		ret = crtc->funcs->set_property(crtc, property, value);
+	if (!ret)
+		drm_object_property_set_value(obj, property, value);
+
+	return ret;
+}
+
+static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+				      struct drm_property *property,
+				      uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_plane *plane = obj_to_plane(obj);
+
+	if (plane->funcs->set_property)
+		ret = plane->funcs->set_property(plane, property, value);
+	if (!ret)
+		drm_object_property_set_value(obj, property, value);
+
+	return ret;
+}
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
+{
+	struct drm_mode_obj_get_properties *arg = data;
 	struct drm_mode_object *obj;
-	struct drm_property *property;
-	struct drm_connector *connector;
-	int ret = -EINVAL;
+	int ret = 0;
 	int i;
+	int copied = 0;
+	int props_count = 0;
+	uint32_t __user *props_ptr;
+	uint64_t __user *prop_values_ptr;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -2998,60 +3305,95 @@
 
 	sx_xlock(&dev->mode_config.mutex);
 
-	obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
 	if (!obj) {
+		ret = -EINVAL;
 		goto out;
 	}
-	connector = obj_to_connector(obj);
+	if (!obj->properties) {
+		ret = -EINVAL;
+		goto out;
+	}
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] == out_resp->prop_id)
-			break;
+	props_count = obj->properties->count;
+
+	/* This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it. */
+	if ((arg->count_props >= props_count) && props_count) {
+		copied = 0;
+		props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+		prop_values_ptr = (uint64_t __user *)(unsigned long)
+				  (arg->prop_values_ptr);
+		for (i = 0; i < props_count; i++) {
+			if (put_user(obj->properties->ids[i],
+				     props_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+			if (put_user(obj->properties->values[i],
+				     prop_values_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+			copied++;
+		}
 	}
+	arg->count_props = props_count;
+out:
+	sx_xunlock(&dev->mode_config.mutex);
+	return ret;
+}
 
-	if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	struct drm_mode_obj_set_property *arg = data;
+	struct drm_mode_object *arg_obj;
+	struct drm_mode_object *prop_obj;
+	struct drm_property *property;
+	int ret = -EINVAL;
+	int i;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	sx_xlock(&dev->mode_config.mutex);
+
+	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+	if (!arg_obj)
 		goto out;
-	}
+	if (!arg_obj->properties)
+		goto out;
 
-	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
-	if (!obj) {
+	for (i = 0; i < arg_obj->properties->count; i++)
+		if (arg_obj->properties->ids[i] == arg->prop_id)
+			break;
+
+	if (i == arg_obj->properties->count)
 		goto out;
-	}
-	property = obj_to_property(obj);
 
-	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+	prop_obj = drm_mode_object_find(dev, arg->prop_id,
+					DRM_MODE_OBJECT_PROPERTY);
+	if (!prop_obj)
 		goto out;
+	property = obj_to_property(prop_obj);
 
-	if (property->flags & DRM_MODE_PROP_RANGE) {
-		if (out_resp->value < property->values[0])
-			goto out;
+	if (!drm_property_change_is_valid(property, arg->value))
+		goto out;
 
-		if (out_resp->value > property->values[1])
-			goto out;
-	} else {
-		int found = 0;
-		for (i = 0; i < property->num_values; i++) {
-			if (property->values[i] == out_resp->value) {
-				found = 1;
-				break;
-			}
-		}
-		if (!found) {
-			goto out;
-		}
+	switch (arg_obj->type) {
+	case DRM_MODE_OBJECT_CONNECTOR:
+		ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+						      arg->value);
+		break;
+	case DRM_MODE_OBJECT_CRTC:
+		ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+		break;
+	case DRM_MODE_OBJECT_PLANE:
+		ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+		break;
 	}
 
-	/* Do DPMS ourselves */
-	if (property == connector->dev->mode_config.dpms_property) {
-		if (connector->funcs->dpms)
-			(*connector->funcs->dpms)(connector, (int) out_resp->value);
-		ret = 0;
-	} else if (connector->funcs->set_property)
-		ret = connector->funcs->set_property(connector, property, out_resp->value);
-
-	/* store the property value if successful */
-	if (!ret)
-		drm_connector_property_set_value(connector, property, out_resp->value);
 out:
 	sx_xunlock(&dev->mode_config.mutex);
 	return ret;
@@ -3070,6 +3412,7 @@
 	}
 	return -ENOMEM;
 }
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
 
 void drm_mode_connector_detach_encoder(struct drm_connector *connector,
 				    struct drm_encoder *encoder)
@@ -3084,6 +3427,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
 
 int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
 				  int gamma_size)
@@ -3092,9 +3436,14 @@
 
 	crtc->gamma_store = malloc(gamma_size * sizeof(uint16_t) * 3,
 	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
+	if (!crtc->gamma_store) {
+		crtc->gamma_size = 0;
+		return -ENOMEM;
+	}
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
 
 int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 			     void *data, struct drm_file *file_priv)
@@ -3117,6 +3466,11 @@
 	}
 	crtc = obj_to_crtc(obj);
 
+	if (crtc->funcs->gamma_set == NULL) {
+		ret = -ENOSYS;
+		goto out;
+	}
+
 	/* memcpy into gamma store */
 	if (crtc_lut->gamma_size != crtc->gamma_size) {
 		ret = -EINVAL;
@@ -3125,19 +3479,19 @@
 
 	size = crtc_lut->gamma_size * (sizeof(uint16_t));
 	r_base = crtc->gamma_store;
-	if (copyin((void *)(uintptr_t)crtc_lut->red, r_base, size)) {
+	if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
 		ret = -EFAULT;
 		goto out;
 	}
 
 	g_base = (char *)r_base + size;
-	if (copyin((void *)(uintptr_t)crtc_lut->green, g_base, size)) {
+	if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
 		ret = -EFAULT;
 		goto out;
 	}
 
 	b_base = (char *)g_base + size;
-	if (copyin((void *)(uintptr_t)crtc_lut->blue, b_base, size)) {
+	if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3179,19 +3533,19 @@
 
 	size = crtc_lut->gamma_size * (sizeof(uint16_t));
 	r_base = crtc->gamma_store;
-	if (copyout(r_base, (void *)(uintptr_t)crtc_lut->red, size)) {
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
 		ret = -EFAULT;
 		goto out;
 	}
 
 	g_base = (char *)r_base + size;
-	if (copyout(g_base, (void *)(uintptr_t)crtc_lut->green, size)) {
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
 		ret = -EFAULT;
 		goto out;
 	}
 
 	b_base = (char *)g_base + size;
-	if (copyout(b_base, (void *)(uintptr_t)crtc_lut->blue, size)) {
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3201,14 +3555,14 @@
 }
 
 static void
-drm_kms_free(void *arg)
+free_vblank_event(void *arg)
 {
 
 	free(arg, DRM_MEM_KMS);
 }
 
-int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data,
-    struct drm_file *file_priv)
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
 {
 	struct drm_mode_crtc_page_flip *page_flip = data;
 	struct drm_mode_object *obj;
@@ -3215,11 +3569,15 @@
 	struct drm_crtc *crtc;
 	struct drm_framebuffer *fb;
 	struct drm_pending_vblank_event *e = NULL;
-	int ret = EINVAL;
+#ifdef __linux__
+	unsigned long flags;
+#endif
+	int hdisplay, vdisplay;
+	int ret = -EINVAL;
 
 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
 	    page_flip->reserved != 0)
-		return (EINVAL);
+		return -EINVAL;
 
 	sx_xlock(&dev->mode_config.mutex);
 	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
@@ -3232,7 +3590,7 @@
 		 * due to a hotplug event, that userspace has not
 		 * yet discovered.
 		 */
-		ret = EBUSY;
+		ret = -EBUSY;
 		goto out;
 	}
 
@@ -3244,20 +3602,29 @@
 		goto out;
 	fb = obj_to_fb(obj);
 
-	if (crtc->mode.hdisplay > fb->width ||
-	    crtc->mode.vdisplay > fb->height ||
-	    crtc->x > fb->width - crtc->mode.hdisplay ||
-	    crtc->y > fb->height - crtc->mode.vdisplay) {
-		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n",
-			      fb->width, fb->height,
-			      crtc->mode.hdisplay, crtc->mode.vdisplay,
-			      crtc->x, crtc->y);
-		ret = ENOSPC;
+	hdisplay = crtc->mode.hdisplay;
+	vdisplay = crtc->mode.vdisplay;
+
+	if (crtc->invert_dimensions) {
+		int tmp;
+		tmp = vdisplay;
+		vdisplay = hdisplay;
+		hdisplay = tmp;
+	}
+
+	if (hdisplay > fb->width ||
+	    vdisplay > fb->height ||
+	    crtc->x > fb->width - hdisplay ||
+	    crtc->y > fb->height - vdisplay) {
+		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+			      fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
+			      crtc->invert_dimensions ? " (inverted)" : "");
+		ret = -ENOSPC;
 		goto out;
 	}
 
 	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		ret = ENOMEM;
+		ret = -ENOMEM;
 		mtx_lock(&dev->event_lock);
 		if (file_priv->event_space < sizeof e->event) {
 			mtx_unlock(&dev->event_lock);
@@ -3267,6 +3634,12 @@
 		mtx_unlock(&dev->event_lock);
 
 		e = malloc(sizeof *e, DRM_MEM_KMS, M_WAITOK | M_ZERO);
+		if (e == NULL) {
+			mtx_lock(&dev->event_lock);
+			file_priv->event_space += sizeof e->event;
+			mtx_unlock(&dev->event_lock);
+			goto out;
+		}
 
 		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
 		e->event.base.length = sizeof e->event;
@@ -3274,11 +3647,11 @@
 		e->base.event = &e->event.base;
 		e->base.file_priv = file_priv;
 		e->base.destroy =
-			(void (*) (struct drm_pending_event *))drm_kms_free;
+			(void (*) (struct drm_pending_event *)) free_vblank_event;
 	}
 
-	ret = -crtc->funcs->page_flip(crtc, fb, e);
-	if (ret != 0) {
+	ret = crtc->funcs->page_flip(crtc, fb, e);
+	if (ret) {
 		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
 			mtx_lock(&dev->event_lock);
 			file_priv->event_space += sizeof e->event;
@@ -3289,9 +3662,7 @@
 
 out:
 	sx_xunlock(&dev->mode_config.mutex);
-	CTR3(KTR_DRM, "page_flip_ioctl %d %d %d", curproc->p_pid,
-	    page_flip->crtc_id, ret);
-	return (ret);
+	return ret;
 }
 
 void drm_mode_config_reset(struct drm_device *dev)
@@ -3308,10 +3679,14 @@
 		if (encoder->funcs->reset)
 			encoder->funcs->reset(encoder);
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		connector->status = connector_status_unknown;
+
 		if (connector->funcs->reset)
 			connector->funcs->reset(connector);
+	}
 }
+EXPORT_SYMBOL(drm_mode_config_reset);
 
 int drm_mode_create_dumb_ioctl(struct drm_device *dev,
 			       void *data, struct drm_file *file_priv)
@@ -3319,7 +3694,7 @@
 	struct drm_mode_create_dumb *args = data;
 
 	if (!dev->driver->dumb_create)
-		return -ENOTSUP;
+		return -ENOSYS;
 	return dev->driver->dumb_create(file_priv, dev, args);
 }
 
@@ -3330,7 +3705,7 @@
 
 	/* call driver ioctl to get mmap offset */
 	if (!dev->driver->dumb_map_offset)
-		return -ENOTSUP;
+		return -ENOSYS;
 
 	return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
 }
@@ -3341,7 +3716,7 @@
 	struct drm_mode_destroy_dumb *args = data;
 
 	if (!dev->driver->dumb_destroy)
-		return -ENOTSUP;
+		return -ENOSYS;
 
 	return dev->driver->dumb_destroy(file_priv, dev, args->handle);
 }
@@ -3354,6 +3729,7 @@
 			  int *bpp)
 {
 	switch (format) {
+	case DRM_FORMAT_C8:
 	case DRM_FORMAT_RGB332:
 	case DRM_FORMAT_BGR233:
 		*depth = 8;
@@ -3412,3 +3788,145 @@
 		break;
 	}
 }
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 3;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * RETURNS:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+	unsigned int depth;
+	int bpp;
+
+	if (plane >= drm_format_num_planes(format))
+		return 0;
+
+	switch (format) {
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		return 2;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+		return plane ? 2 : 1;
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 1;
+	default:
+		drm_fb_get_bpp_depth(format, &depth, &bpp);
+		return bpp >> 3;
+	}
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+		return 4;
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+		return 4;
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);

Modified: trunk/sys/dev/drm2/drm_crtc.h
===================================================================
--- trunk/sys/dev/drm2/drm_crtc.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_crtc.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -23,19 +23,21 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_crtc.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_crtc.h 282199 2015-04-28 19:35:05Z dumbbell $
  */
 #ifndef __DRM_CRTC_H__
 #define __DRM_CRTC_H__
 
-#include <dev/drm2/drm_gem_names.h>
+#include <dev/drm2/drm_mode.h>
+
 #include <dev/drm2/drm_fourcc.h>
 
 struct drm_device;
 struct drm_mode_set;
 struct drm_framebuffer;
-struct i2c_adapter;
+struct drm_object_properties;
 
+
 #define DRM_MODE_OBJECT_CRTC 0xcccccccc
 #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
 #define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
@@ -48,8 +50,16 @@
 struct drm_mode_object {
 	uint32_t id;
 	uint32_t type;
+	struct drm_object_properties *properties;
 };
 
+#define DRM_OBJECT_MAX_PROPERTY 24
+struct drm_object_properties {
+	int count;
+	uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+	uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
 /*
  * Note on terminology:  here, for brevity and convenience, we refer to connector
  * control chips as 'CRTCs'.  They can control any type of connector, VGA, LVDS,
@@ -65,7 +75,7 @@
     MODE_H_ILLEGAL,	/* mode has illegal horizontal timings */
     MODE_V_ILLEGAL,	/* mode has illegal horizontal timings */
     MODE_BAD_WIDTH,	/* requires an unsupported linepitch */
-    MODE_NOMODE,	/* no mode with a maching name */
+    MODE_NOMODE,	/* no mode with a matching name */
     MODE_NO_INTERLACE,	/* interlaced mode not supported */
     MODE_NO_DBLESCAN,	/* doublescan mode not supported */
     MODE_NO_VSCAN,	/* multiscan mode not supported */
@@ -107,7 +117,8 @@
 	.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
 	.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
 	.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
-	.vscan = (vs), .flags = (f), .vrefresh = 0
+	.vscan = (vs), .flags = (f), .vrefresh = 0, \
+	.base.type = DRM_MODE_OBJECT_MODE
 
 #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
 
@@ -118,9 +129,8 @@
 
 	char name[DRM_DISPLAY_MODE_LEN];
 
-	int connector_count;
 	enum drm_mode_status status;
-	int type;
+	unsigned int type;
 
 	/* Proposed mode values */
 	int clock;		/* in kHz */
@@ -156,8 +166,6 @@
 	int crtc_vsync_start;
 	int crtc_vsync_end;
 	int crtc_vtotal;
-	int crtc_hadjusted;
-	int crtc_vadjusted;
 
 	/* Driver private mode info */
 	int private_size;
@@ -206,11 +214,10 @@
 	u32 color_formats;
 
 	u8 cea_rev;
-
-	char *raw_edid; /* if any */
 };
 
 struct drm_framebuffer_funcs {
+	/* note: use drm_framebuffer_remove() */
 	void (*destroy)(struct drm_framebuffer *framebuffer);
 	int (*create_handle)(struct drm_framebuffer *fb,
 			     struct drm_file *file_priv,
@@ -235,6 +242,16 @@
 
 struct drm_framebuffer {
 	struct drm_device *dev;
+	/*
+	 * Note that the fb is refcounted for the benefit of driver internals,
+	 * for example some hw, disabling a CRTC/plane is asynchronous, and
+	 * scanout does not actually complete until the next vblank.  So some
+	 * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+	 * should be deferred.  In cases like this, the driver would like to
+	 * hold a ref to the fb even though it has already been removed from
+	 * userspace perspective.
+	 */
+	unsigned int refcount;
 	struct list_head head;
 	struct drm_mode_object base;
 	const struct drm_framebuffer_funcs *funcs;
@@ -284,19 +301,16 @@
 
 /**
  * drm_crtc_funcs - control CRTCs for a given device
+ * @save: save CRTC state
+ * @restore: restore CRTC state
  * @reset: reset CRTC after state has been invalidate (e.g. resume)
- * @dpms: control display power levels
- * @save: save CRTC state
- * @resore: restore CRTC state
- * @lock: lock the CRTC
- * @unlock: unlock the CRTC
- * @shadow_allocate: allocate shadow pixmap
- * @shadow_create: create shadow pixmap for rotation support
- * @shadow_destroy: free shadow pixmap
- * @mode_fixup: fixup proposed mode
- * @mode_set: set the desired mode on the CRTC
+ * @cursor_set: setup the cursor
+ * @cursor_move: move the cursor
  * @gamma_set: specify color ramp for CRTC
- * @destroy: deinit and free object.
+ * @destroy: deinit and free object
+ * @set_property: called when a property is changed
+ * @set_config: apply a new CRTC configuration
+ * @page_flip: initiate a page flip
  *
  * The drm_crtc_funcs structure is the central CRTC management structure
  * in the DRM.  Each CRTC controls one or more connectors (note that the name
@@ -330,7 +344,7 @@
 
 	/*
 	 * Flip to the given framebuffer.  This implements the page
-	 * flip ioctl descibed in drm_mode.h, specifically, the
+	 * flip ioctl described in drm_mode.h, specifically, the
 	 * implementation must return immediately and block all
 	 * rendering to the current fb until the flip has completed.
 	 * If userspace set the event flag in the ioctl, the event
@@ -340,14 +354,32 @@
 	int (*page_flip)(struct drm_crtc *crtc,
 			 struct drm_framebuffer *fb,
 			 struct drm_pending_vblank_event *event);
+
+	int (*set_property)(struct drm_crtc *crtc,
+			    struct drm_property *property, uint64_t val);
 };
 
 /**
  * drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object for ID tracking etc.
  * @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
+ * @invert_dimensions: for purposes of error checking crtc vs fb sizes,
+ *    invert the width/height of the crtc.  This is used if the driver
+ *    is performing 90 or 270 degree rotated scanout
  * @x: x position on screen
  * @y: y position on screen
  * @funcs: CRTC control functions
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+ * @framedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
  *
  * Each CRTC may have one or more connectors associated with it.  This structure
  * allows the CRTC to be controlled.
@@ -371,6 +403,8 @@
 	 */
 	struct drm_display_mode hwmode;
 
+	bool invert_dimensions;
+
 	int x, y;
 	const struct drm_crtc_funcs *funcs;
 
@@ -379,10 +413,12 @@
 	uint16_t *gamma_store;
 
 	/* Constants needed for precise vblank and swap timestamping. */
-	int64_t framedur_ns, linedur_ns, pixeldur_ns;
+	s64 framedur_ns, linedur_ns, pixeldur_ns;
 
 	/* if you are using the helper */
 	void *helper_private;
+
+	struct drm_object_properties properties;
 };
 
 
@@ -392,11 +428,8 @@
  * @save: save connector state
  * @restore: restore connector state
  * @reset: reset connector after state has been invalidate (e.g. resume)
- * @mode_valid: is this mode valid on the given connector?
- * @mode_fixup: try to fixup proposed mode for this connector
- * @mode_set: set this mode
  * @detect: is this connector active?
- * @get_modes: get mode list for this connector
+ * @fill_modes: fill mode list for this connector
  * @set_property: property for this connector may need update
  * @destroy: make object go away
  * @force: notify the driver the connector is forced on
@@ -426,6 +459,13 @@
 	void (*force)(struct drm_connector *connector);
 };
 
+/**
+ * drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
 struct drm_encoder_funcs {
 	void (*reset)(struct drm_encoder *encoder);
 	void (*destroy)(struct drm_encoder *encoder);
@@ -432,12 +472,23 @@
 };
 
 #define DRM_CONNECTOR_MAX_UMODES 16
-#define DRM_CONNECTOR_MAX_PROPERTY 16
 #define DRM_CONNECTOR_LEN 32
-#define DRM_CONNECTOR_MAX_ENCODER 2
+#define DRM_CONNECTOR_MAX_ENCODER 3
 
 /**
  * drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+ * @crtc: currently bound CRTC
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
  */
 struct drm_encoder {
 	struct drm_device *dev;
@@ -473,14 +524,36 @@
 
 /**
  * drm_connector - central DRM connector control structure
- * @crtc: CRTC this connector is currently connected to, NULL if none
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
- * @available_modes: modes available on this connector (from get_modes() + user)
- * @initial_x: initial x position for this connector
- * @initial_y: initial y position for this connector
- * @status: connector connected?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @display_info: information about attached display (e.g. from EDID)
  * @funcs: connector control functions
+ * @user_modes: user added mode list
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @properties: property tracking for this connector
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
  *
  * Each connector may be connected to one or more CRTCs, or may be clonable by
  * another connector if they can share a CRTC.  Each connector also has a specific
@@ -489,7 +562,9 @@
  */
 struct drm_connector {
 	struct drm_device *dev;
-	/* struct device kdev; XXXKIB */
+#ifdef FREEBSD_NOTYET
+	struct device kdev;
+#endif /* FREEBSD_NOTYET */
 	struct device_attribute *attr;
 	struct list_head head;
 
@@ -501,7 +576,6 @@
 	bool doublescan_allowed;
 	struct list_head modes; /* list of modes on this connector */
 
-	int initial_x, initial_y;
 	enum drm_connector_status status;
 
 	/* these are modes added by probing with DDC or the BIOS */
@@ -512,8 +586,7 @@
 
 	struct list_head user_modes;
 	struct drm_property_blob *edid_blob_ptr;
-	u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
-	uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
+	struct drm_object_properties properties;
 
 	uint8_t polled; /* DRM_CONNECTOR_POLL_* */
 
@@ -525,7 +598,6 @@
 	/* forced on connector */
 	enum drm_connector_force force;
 	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
-	uint32_t force_encoder_id;
 	struct drm_encoder *encoder; /* currently active encoder */
 
 	/* EDID bits */
@@ -535,8 +607,8 @@
 	bool latency_present[2];
 	int video_latency[2];	/* [0]: progressive, [1]: interlaced */
 	int audio_latency[2];
-
 	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+	unsigned bad_edid_counter;
 };
 
 /**
@@ -544,6 +616,7 @@
  * @update_plane: update the plane configuration
  * @disable_plane: shut down the plane
  * @destroy: clean up plane resources
+ * @set_property: called when a property is changed
  */
 struct drm_plane_funcs {
 	int (*update_plane)(struct drm_plane *plane,
@@ -554,6 +627,9 @@
 			    uint32_t src_w, uint32_t src_h);
 	int (*disable_plane)(struct drm_plane *plane);
 	void (*destroy)(struct drm_plane *plane);
+
+	int (*set_property)(struct drm_plane *plane,
+			    struct drm_property *property, uint64_t val);
 };
 
 /**
@@ -571,6 +647,7 @@
  * @enabled: enabled flag
  * @funcs: helper functions
  * @helper_private: storage for drver layer
+ * @properties: property tracking for this plane
  */
 struct drm_plane {
 	struct drm_device *dev;
@@ -593,10 +670,20 @@
 
 	const struct drm_plane_funcs *funcs;
 	void *helper_private;
+
+	struct drm_object_properties properties;
 };
 
 /**
- * struct drm_mode_set
+ * drm_mode_set - new values for a CRTC config change
+ * @head: list management
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
  *
  * Represents a single crtc the connectors that it drives with what mode
  * and from which framebuffer it scans out from.
@@ -604,8 +691,6 @@
  * This is used to set modes.
  */
 struct drm_mode_set {
-	struct list_head head;
-
 	struct drm_framebuffer *fb;
 	struct drm_crtc *crtc;
 	struct drm_display_mode *mode;
@@ -618,15 +703,34 @@
 };
 
 /**
- * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
  */
 struct drm_mode_config_funcs {
 	int (*fb_create)(struct drm_device *dev,
-	    struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd,
-	    struct drm_framebuffer **res);
+					     struct drm_file *file_priv,
+					     struct drm_mode_fb_cmd2 *mode_cmd,
+					     struct drm_framebuffer **fb);
 	void (*output_poll_changed)(struct drm_device *dev);
 };
 
+/**
+ * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state.  But in the
+ * future it could allow groups of objects to be set aside into independent
+ * control groups for use by different user level processes (e.g. two X servers
+ * running simultaneously on different heads, each with their own mode
+ * configuration and freedom of mode setting).
+ */
 struct drm_mode_group {
 	uint32_t num_crtcs;
 	uint32_t num_encoders;
@@ -638,11 +742,34 @@
 
 /**
  * drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+ * @connector_list: list of connector objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @*_property: core property tracking
  *
+ * Core mode resource tracking structure.  All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties.  Some
+ * global restrictions are also here, e.g. dimension restrictions.
  */
 struct drm_mode_config {
 	struct sx mutex; /* protects configuration (mode lists etc.) */
-	struct drm_gem_names crtc_names; /* use this idr for all IDs, fb, crtc, connector, modes */
+	struct drm_gem_names crtc_names; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
 	/* this is limited to one for now */
 	int num_fb;
 	struct list_head fb_list;
@@ -660,12 +787,13 @@
 
 	int min_width, min_height;
 	int max_width, max_height;
-	struct drm_mode_config_funcs *funcs;
+	const struct drm_mode_config_funcs *funcs;
 	resource_size_t fb_base;
 
 	/* output poll support */
 	bool poll_enabled;
-	struct timeout_task output_poll_task;
+	bool poll_running;
+	struct timeout_task output_poll_work;
 
 	/* pointers to standard properties */
 	struct list_head property_blob_list;
@@ -714,16 +842,6 @@
 	char *name;
 };
 
-#if defined(MODE_SETTING_LOCKING_IS_NOT_BROKEN)
-#define	DRM_MODE_CONFIG_ASSERT_LOCKED(dev) \
-	sx_assert(&dev->mode_config.mutex, SA_XLOCKED)
-#else
-#define	DRM_MODE_CONFIG_ASSERT_LOCKED(dev)
-#endif
-
-extern char *drm_get_dirty_info_name(int val);
-extern char *drm_get_connector_status_name(enum drm_connector_status status);
-
 extern int drm_crtc_init(struct drm_device *dev,
 			 struct drm_crtc *crtc,
 			 const struct drm_crtc_funcs *funcs);
@@ -735,6 +853,8 @@
 			      int connector_type);
 
 extern void drm_connector_cleanup(struct drm_connector *connector);
+/* helper to unplug all connectors from sysfs for device */
+extern void drm_connector_unplug_all(struct drm_device *dev);
 
 extern int drm_encoder_init(struct drm_device *dev,
 			    struct drm_encoder *encoder,
@@ -752,28 +872,34 @@
 extern void drm_encoder_cleanup(struct drm_encoder *encoder);
 
 extern char *drm_get_connector_name(struct drm_connector *connector);
+extern char *drm_get_connector_status_name(enum drm_connector_status status);
 extern char *drm_get_dpms_name(int val);
 extern char *drm_get_dvi_i_subconnector_name(int val);
 extern char *drm_get_dvi_i_select_name(int val);
 extern char *drm_get_tv_subconnector_name(int val);
+extern char *drm_get_dirty_info_name(int val);
 extern char *drm_get_tv_select_name(int val);
 extern void drm_fb_release(struct drm_file *file_priv);
+extern int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group);
+extern void drm_mode_group_free(struct drm_mode_group *group);
 extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
+extern bool drm_probe_ddc(device_t adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 device_t adapter);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
 extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
 extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
-    const struct drm_display_mode *mode);
-extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
+						   const struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
 extern void drm_mode_config_init(struct drm_device *dev);
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
-extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
-extern int drm_mode_width(struct drm_display_mode *mode);
-extern int drm_mode_height(struct drm_display_mode *mode);
+extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern int drm_mode_width(const struct drm_display_mode *mode);
+extern int drm_mode_height(const struct drm_display_mode *mode);
 
 /* for us by fb module */
 extern int drm_mode_attachmode_crtc(struct drm_device *dev,
@@ -789,8 +915,8 @@
 				   struct list_head *mode_list,
 				   int maxX, int maxY, int maxPitch);
 extern void drm_mode_validate_clocks(struct drm_device *dev,
-			      struct list_head *mode_list,
-			      int *min, int *max, int n_ranges);
+				     struct list_head *mode_list,
+				     int *min, int *max, int n_ranges);
 extern void drm_mode_prune_invalid(struct drm_device *dev,
 				   struct list_head *mode_list, bool verbose);
 extern void drm_mode_sort(struct list_head *mode_list);
@@ -801,10 +927,10 @@
 extern void drm_mode_connector_list_update(struct drm_connector *connector);
 extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
 						struct edid *edid);
-extern int drm_connector_property_set_value(struct drm_connector *connector,
+extern int drm_object_property_set_value(struct drm_mode_object *obj,
 					 struct drm_property *property,
-					 uint64_t value);
-extern int drm_connector_property_get_value(struct drm_connector *connector,
+					 uint64_t val);
+extern int drm_object_property_get_value(struct drm_mode_object *obj,
 					 struct drm_property *property,
 					 uint64_t *value);
 extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
@@ -813,6 +939,9 @@
 extern int drm_framebuffer_init(struct drm_device *dev,
 				struct drm_framebuffer *fb,
 				const struct drm_framebuffer_funcs *funcs);
+extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
 extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
 extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
 extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
@@ -819,8 +948,9 @@
 extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
 extern bool drm_crtc_in_use(struct drm_crtc *crtc);
 
-extern int drm_connector_attach_property(struct drm_connector *connector,
-				      struct drm_property *property, uint64_t init_val);
+extern void drm_object_attach_property(struct drm_mode_object *obj,
+				       struct drm_property *property,
+				       uint64_t init_val);
 extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
 						const char *name, int num_values);
 extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
@@ -827,6 +957,10 @@
 					 const char *name,
 					 const struct drm_prop_enum_list *props,
 					 int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+					 int flags, const char *name,
+					 const struct drm_prop_enum_list *props,
+					 int num_values);
 struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
 					 const char *name,
 					 uint64_t min, uint64_t max);
@@ -902,6 +1036,8 @@
 				    void *data, struct drm_file *file_priv);
 extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 				    void *data, struct drm_file *file_priv);
+extern u8 *drm_find_cea_extension(struct edid *edid);
+extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
 extern bool drm_detect_hdmi_monitor(struct edid *edid);
 extern bool drm_detect_monitor_audio(struct edid *edid);
 extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
@@ -918,11 +1054,14 @@
 				int GTF_2C, int GTF_K, int GTF_2J);
 extern int drm_add_modes_noedid(struct drm_connector *connector,
 				int hdisplay, int vdisplay);
+extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
 
 extern int drm_edid_header_is_valid(const u8 *raw_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
 extern bool drm_edid_is_valid(struct edid *edid);
 struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
-					   int hsize, int vsize, int fresh);
+					   int hsize, int vsize, int fresh,
+					   bool rb);
 
 extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
 				      void *data, struct drm_file *file_priv);
@@ -930,7 +1069,16 @@
 				    void *data, struct drm_file *file_priv);
 extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
 				      void *data, struct drm_file *file_priv);
+extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+					     struct drm_file *file_priv);
+extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+					   struct drm_file *file_priv);
 
 extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
 				 int *bpp);
+extern int drm_format_num_planes(uint32_t format);
+extern int drm_format_plane_cpp(uint32_t format, int plane);
+extern int drm_format_horz_chroma_subsampling(uint32_t format);
+extern int drm_format_vert_chroma_subsampling(uint32_t format);
+
 #endif /* __DRM_CRTC_H__ */

Modified: trunk/sys/dev/drm2/drm_crtc_helper.c
===================================================================
--- trunk/sys/dev/drm2/drm_crtc_helper.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_crtc_helper.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -31,50 +31,56 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_crtc_helper.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_crtc_helper.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-#include <sys/param.h>
-#include <sys/systm.h>
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm_crtc.h>
 #include <dev/drm2/drm_fourcc.h>
 #include <dev/drm2/drm_crtc_helper.h>
 #include <dev/drm2/drm_fb_helper.h>
+#include <dev/drm2/drm_edid.h>
 
-bool
-drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
-    struct drm_cmdline_mode *cmdline_mode)
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * 						connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
 {
-	char *tun_var_name, *tun_mode;
-	static const char tun_prefix[] = "drm_mode.";
-	bool res;
+	struct drm_connector *connector, *tmp;
+	struct list_head panel_list;
 
-	res = false;
-	tun_var_name = malloc(sizeof(tun_prefix) +
-	    strlen(drm_get_connector_name(connector)), M_TEMP, M_WAITOK);
-	strcpy(tun_var_name, tun_prefix);
-	strcat(tun_var_name, drm_get_connector_name(connector));
-	tun_mode = getenv(tun_var_name);
-	if (tun_mode != NULL) {
-		res = drm_mode_parse_command_line_for_connector(tun_mode,
-		    connector, cmdline_mode);
-		freeenv(tun_mode);
+	INIT_LIST_HEAD(&panel_list);
+
+	list_for_each_entry_safe(connector, tmp,
+				 &dev->mode_config.connector_list, head) {
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			list_move_tail(&connector->head, &panel_list);
 	}
-	free(tun_var_name, M_TEMP);
-	return (res);
+
+	list_splice(&panel_list, &dev->mode_config.connector_list);
 }
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
 
 static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
 
 static void drm_mode_validate_flag(struct drm_connector *connector,
 				   int flags)
 {
-	struct drm_display_mode *mode, *t;
+	struct drm_display_mode *mode;
 
 	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
 		return;
 
-	list_for_each_entry_safe(mode, t, &connector->modes, head) {
+	list_for_each_entry(mode, &connector->modes, head) {
 		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
 				!(flags & DRM_MODE_FLAG_INTERLACE))
 			mode->status = MODE_NO_INTERLACE;
@@ -88,7 +94,7 @@
 
 /**
  * drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
  * @maxX: max width for modes
  * @maxY: max height for modes
  *
@@ -95,16 +101,15 @@
  * LOCKING:
  * Caller must hold mode config lock.
  *
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them.  Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes.  Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
  *
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
  *
- * FIXME: take into account monitor limits
- *
  * RETURNS:
  * Number of modes found on @connector.
  */
@@ -112,10 +117,9 @@
 					    uint32_t maxX, uint32_t maxY)
 {
 	struct drm_device *dev = connector->dev;
-	struct drm_display_mode *mode, *t;
+	struct drm_display_mode *mode;
 	struct drm_connector_helper_funcs *connector_funcs =
 		connector->helper_private;
-	struct drm_cmdline_mode cmdline_mode;
 	int count = 0;
 	int mode_flags = 0;
 
@@ -122,7 +126,7 @@
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
 			drm_get_connector_name(connector));
 	/* set all modes to the unverified state */
-	list_for_each_entry_safe(mode, t, &connector->modes, head)
+	list_for_each_entry(mode, &connector->modes, head)
 		mode->status = MODE_UNVERIFIED;
 
 	if (connector->force) {
@@ -134,9 +138,14 @@
 			connector->funcs->force(connector);
 	} else {
 		connector->status = connector->funcs->detect(connector, true);
-		drm_kms_helper_poll_enable(dev);
 	}
 
+	/* Re-enable polling in case the global poll config changed. */
+	if (drm_kms_helper_poll != dev->mode_config.poll_running)
+		drm_kms_helper_poll_enable(dev);
+
+	dev->mode_config.poll_running = drm_kms_helper_poll;
+
 	if (connector->status == connector_status_disconnected) {
 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
 			connector->base.id, drm_get_connector_name(connector));
@@ -144,26 +153,14 @@
 		goto prune;
 	}
 
-	count = (*connector_funcs->get_modes)(connector);
-	if (count == 0 && drm_fetch_cmdline_mode_from_kenv(connector,
-	    &cmdline_mode)) {
-		mode = drm_mode_create_from_cmdline_mode(dev,
-		    &cmdline_mode);
-		if (mode != NULL) {
-			DRM_DEBUG_KMS(
-	"[CONNECTOR:%d:%s] found manual override ",
-			    connector->base.id,
-			    drm_get_connector_name(connector));
-			drm_mode_debug_printmodeline(mode);
-			drm_mode_probed_add(connector, mode);
-			count++;
-		} else {
-			DRM_ERROR(
-	"[CONNECTOR:%d:%s] manual override mode: parse error\n",
-			    connector->base.id,
-			    drm_get_connector_name(connector));
-		}
-	}
+#ifdef FREEBSD_NOTYET
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+	count = drm_load_edid_firmware(connector);
+	if (count == 0)
+#endif
+#endif /* FREEBSD_NOTYET */
+		count = (*connector_funcs->get_modes)(connector);
+
 	if (count == 0 && connector->status == connector_status_connected)
 		count = drm_add_modes_noedid(connector, 1024, 768);
 	if (count == 0)
@@ -181,7 +178,7 @@
 		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
 	drm_mode_validate_flag(connector, mode_flags);
 
-	list_for_each_entry_safe(mode, t, &connector->modes, head) {
+	list_for_each_entry(mode, &connector->modes, head) {
 		if (mode->status == MODE_OK)
 			mode->status = connector_funcs->mode_valid(connector,
 								   mode);
@@ -193,13 +190,14 @@
 	if (list_empty(&connector->modes))
 		return 0;
 
+	list_for_each_entry(mode, &connector->modes, head)
+		mode->vrefresh = drm_mode_vrefresh(mode);
+
 	drm_mode_sort(&connector->modes);
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
 			drm_get_connector_name(connector));
-	list_for_each_entry_safe(mode, t, &connector->modes, head) {
-		mode->vrefresh = drm_mode_vrefresh(mode);
-
+	list_for_each_entry(mode, &connector->modes, head) {
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 		drm_mode_debug_printmodeline(mode);
 	}
@@ -206,6 +204,7 @@
 
 	return count;
 }
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
 
 /**
  * drm_helper_encoder_in_use - check if a given encoder is in use
@@ -228,6 +227,7 @@
 			return true;
 	return false;
 }
+EXPORT_SYMBOL(drm_helper_encoder_in_use);
 
 /**
  * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
@@ -251,6 +251,7 @@
 			return true;
 	return false;
 }
+EXPORT_SYMBOL(drm_helper_crtc_in_use);
 
 static void
 drm_encoder_disable(struct drm_encoder *encoder)
@@ -306,6 +307,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
 /**
  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -361,17 +363,24 @@
 }
 
 /**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
  * @crtc: CRTC to program
  * @mode: mode to use
- * @x: width of mode
- * @y: height of mode
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
  * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
  *
  * RETURNS:
  * True if the mode was set successfully, or false otherwise.
@@ -420,11 +429,13 @@
 		encoder_funcs = encoder->helper_private;
 		if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
 						      adjusted_mode))) {
+			DRM_DEBUG_KMS("Encoder fixup failed\n");
 			goto done;
 		}
 	}
 
 	if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+		DRM_DEBUG_KMS("CRTC fixup failed\n");
 		goto done;
 	}
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
@@ -496,7 +507,9 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_crtc_helper_set_mode);
 
+
 static int
 drm_crtc_helper_disable(struct drm_crtc *crtc)
 {
@@ -523,20 +536,19 @@
 
 /**
  * drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
  *
  * RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
  */
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
@@ -550,7 +562,7 @@
 	int count = 0, ro, fail = 0;
 	struct drm_crtc_helper_funcs *crtc_funcs;
 	struct drm_mode_set save_set;
-	int ret = 0;
+	int ret;
 	int i;
 
 	DRM_DEBUG_KMS("\n");
@@ -582,12 +594,25 @@
 
 	/* Allocate space for the backup of all (non-pointer) crtc, encoder and
 	 * connector data. */
-	save_crtcs = malloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc),
-	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
+	save_crtcs = malloc(dev->mode_config.num_crtc *
+			     sizeof(struct drm_crtc), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+	if (!save_crtcs)
+		return -ENOMEM;
+
 	save_encoders = malloc(dev->mode_config.num_encoder *
-	    sizeof(struct drm_encoder), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+				sizeof(struct drm_encoder), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+	if (!save_encoders) {
+		free(save_crtcs, DRM_MEM_KMS);
+		return -ENOMEM;
+	}
+
 	save_connectors = malloc(dev->mode_config.num_connector *
-	    sizeof(struct drm_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+				sizeof(struct drm_connector), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+	if (!save_connectors) {
+		free(save_crtcs, DRM_MEM_KMS);
+		free(save_encoders, DRM_MEM_KMS);
+		return -ENOMEM;
+	}
 
 	/* Copy data. Note that driver private data is not affected.
 	 * Should anything bad happen only the expected state is
@@ -623,6 +648,11 @@
 			mode_changed = true;
 		} else if (set->fb == NULL) {
 			mode_changed = true;
+		} else if (set->fb->depth != set->crtc->fb->depth) {
+			mode_changed = true;
+		} else if (set->fb->bits_per_pixel !=
+			   set->crtc->fb->bits_per_pixel) {
+			mode_changed = true;
 		} else
 			fb_changed = true;
 	}
@@ -733,7 +763,7 @@
 			for (i = 0; i < set->num_connectors; i++) {
 				DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
 					      drm_get_connector_name(set->connectors[i]));
-				set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+				set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
 			}
 		}
 		drm_helper_disable_unused_functions(dev);
@@ -785,6 +815,7 @@
 	free(save_crtcs, DRM_MEM_KMS);
 	return ret;
 }
+EXPORT_SYMBOL(drm_crtc_helper_set_config);
 
 static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
 {
@@ -813,12 +844,14 @@
 }
 
 /**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
  *
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
  */
 void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
@@ -866,6 +899,7 @@
 
 	return;
 }
+EXPORT_SYMBOL(drm_helper_connector_dpms);
 
 int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
 				   struct drm_mode_fb_cmd2 *mode_cmd)
@@ -884,6 +918,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
 
 int drm_helper_resume_force_mode(struct drm_device *dev)
 {
@@ -901,7 +936,7 @@
 		ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
 					       crtc->x, crtc->y, crtc->fb);
 
-		if (!ret)
+		if (ret == false)
 			DRM_ERROR("failed to set mode on crtc %p\n", crtc);
 
 		/* Turn off outputs that were already powered off */
@@ -914,13 +949,13 @@
 				encoder_funcs = encoder->helper_private;
 				if (encoder_funcs->dpms)
 					(*encoder_funcs->dpms) (encoder,
-					    drm_helper_choose_encoder_dpms(encoder));
+								drm_helper_choose_encoder_dpms(encoder));
 			}
 
 			crtc_funcs = crtc->helper_private;
 			if (crtc_funcs->dpms)
 				(*crtc_funcs->dpms) (crtc,
-				    drm_helper_choose_crtc_dpms(crtc));
+						     drm_helper_choose_crtc_dpms(crtc));
 		}
 	}
 	/* disable the unused connectors while restoring the modesetting */
@@ -927,11 +962,23 @@
 	drm_helper_disable_unused_functions(dev);
 	return 0;
 }
+EXPORT_SYMBOL(drm_helper_resume_force_mode);
 
-#define DRM_OUTPUT_POLL_PERIOD (10 * hz)
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+	/* send a uevent + call fbdev */
+#ifdef FREEBSD_NOTYET
+	drm_sysfs_hotplug_event(dev);
+#endif /* FREEBSD_NOTYET */
+	if (dev->mode_config.funcs->output_poll_changed)
+		dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 static void output_poll_execute(void *ctx, int pending)
 {
-	struct drm_device *dev;
+	struct drm_device *dev = ctx;
 	struct drm_connector *connector;
 	enum drm_connector_status old_status;
 	bool repoll = false, changed = false;
@@ -939,26 +986,25 @@
 	if (!drm_kms_helper_poll)
 		return;
 
-	dev = ctx;
-
 	sx_xlock(&dev->mode_config.mutex);
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
-		/* if this is HPD or polled don't check it -
-		   TV out for instance */
-		if (!connector->polled)
+		/* Ignore forced connectors. */
+		if (connector->force)
 			continue;
 
-		else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
-		    DRM_CONNECTOR_POLL_DISCONNECT))
-			repoll = true;
+		/* Ignore HPD capable connectors and connectors where we don't
+		 * want any hotplug detection at all for polling. */
+		if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+			continue;
 
+		repoll = true;
+
 		old_status = connector->status;
 		/* if we are connected and don't want to poll for disconnect
 		   skip it */
 		if (old_status == connector_status_connected &&
-		    !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
-		    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+		    !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
 			continue;
 
 		connector->status = connector->funcs->detect(connector, false);
@@ -972,20 +1018,13 @@
 
 	sx_xunlock(&dev->mode_config.mutex);
 
-	if (changed) {
-#if 0
-		/* send a uevent + call fbdev */
-		drm_sysfs_hotplug_event(dev);
-#endif
-		if (dev->mode_config.funcs->output_poll_changed)
-			dev->mode_config.funcs->output_poll_changed(dev);
-	}
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
 
-	if (repoll) {
+	if (repoll)
 		taskqueue_enqueue_timeout(taskqueue_thread,
-		    &dev->mode_config.output_poll_task,
+		    &dev->mode_config.output_poll_work,
 		    DRM_OUTPUT_POLL_PERIOD);
-	}
 }
 
 void drm_kms_helper_poll_disable(struct drm_device *dev)
@@ -993,8 +1032,9 @@
 	if (!dev->mode_config.poll_enabled)
 		return;
 	taskqueue_cancel_timeout(taskqueue_thread,
-	    &dev->mode_config.output_poll_task, NULL);
+	    &dev->mode_config.output_poll_work, NULL);
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
@@ -1005,40 +1045,63 @@
 		return;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->polled)
+		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+					 DRM_CONNECTOR_POLL_DISCONNECT))
 			poll = true;
 	}
 
-	if (poll) {
+	if (poll)
 		taskqueue_enqueue_timeout(taskqueue_thread,
-		    &dev->mode_config.output_poll_task, DRM_OUTPUT_POLL_PERIOD);
-	}
+		    &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
 void drm_kms_helper_poll_init(struct drm_device *dev)
 {
-
-	TIMEOUT_TASK_INIT(taskqueue_thread, &dev->mode_config.output_poll_task,
+	TIMEOUT_TASK_INIT(taskqueue_thread, &dev->mode_config.output_poll_work,
 	    0, output_poll_execute, dev);
 	dev->mode_config.poll_enabled = true;
 
 	drm_kms_helper_poll_enable(dev);
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
 
 void drm_kms_helper_poll_fini(struct drm_device *dev)
 {
 	drm_kms_helper_poll_disable(dev);
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
 void drm_helper_hpd_irq_event(struct drm_device *dev)
 {
+	struct drm_connector *connector;
+	enum drm_connector_status old_status;
+	bool changed = false;
+
 	if (!dev->mode_config.poll_enabled)
 		return;
 
-	/* kill timer and schedule immediate execution, this doesn't block */
-	taskqueue_cancel_timeout(taskqueue_thread,
-	    &dev->mode_config.output_poll_task, NULL);
-	if (drm_kms_helper_poll)
-		taskqueue_enqueue_timeout(taskqueue_thread,
-		    &dev->mode_config.output_poll_task, 0);
+	sx_xlock(&dev->mode_config.mutex);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+		/* Only handle HPD capable connectors. */
+		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+			continue;
+
+		old_status = connector->status;
+
+		connector->status = connector->funcs->detect(connector, false);
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+			      connector->base.id,
+			      drm_get_connector_name(connector),
+			      old_status, connector->status);
+		if (old_status != connector->status)
+			changed = true;
+	}
+
+	sx_xunlock(&dev->mode_config.mutex);
+
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
 }
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);

Modified: trunk/sys/dev/drm2/drm_crtc_helper.h
===================================================================
--- trunk/sys/dev/drm2/drm_crtc_helper.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_crtc_helper.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -23,7 +23,7 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_crtc_helper.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_crtc_helper.h 282199 2015-04-28 19:35:05Z dumbbell $
  */
 
 /*
@@ -41,6 +41,13 @@
 	ENTER_ATOMIC_MODE_SET,
 };
 
+/**
+ * drm_crtc_helper_funcs - helper operations for CRTCs
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
 struct drm_crtc_helper_funcs {
 	/*
 	 * Control power levels on the CRTC.  If the mode passed in is
@@ -52,7 +59,7 @@
 
 	/* Provider can fixup or change mode timings before modeset occurs */
 	bool (*mode_fixup)(struct drm_crtc *crtc,
-			   struct drm_display_mode *mode,
+			   const struct drm_display_mode *mode,
 			   struct drm_display_mode *adjusted_mode);
 	/* Actually set the mode */
 	int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -73,6 +80,13 @@
 	void (*disable)(struct drm_crtc *crtc);
 };
 
+/**
+ * drm_encoder_helper_funcs - helper operations for encoders
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
 struct drm_encoder_helper_funcs {
 	void (*dpms)(struct drm_encoder *encoder, int mode);
 	void (*save)(struct drm_encoder *encoder);
@@ -79,7 +93,7 @@
 	void (*restore)(struct drm_encoder *encoder);
 
 	bool (*mode_fixup)(struct drm_encoder *encoder,
-			   struct drm_display_mode *mode,
+			   const struct drm_display_mode *mode,
 			   struct drm_display_mode *adjusted_mode);
 	void (*prepare)(struct drm_encoder *encoder);
 	void (*commit)(struct drm_encoder *encoder);
@@ -94,6 +108,13 @@
 	void (*disable)(struct drm_encoder *encoder);
 };
 
+/**
+ * drm_connector_helper_funcs - helper operations for connectors
+ * @get_modes: get mode list for this connector
+ * @mode_valid: is this mode valid on the given connector?
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
 struct drm_connector_helper_funcs {
 	int (*get_modes)(struct drm_connector *connector);
 	int (*mode_valid)(struct drm_connector *connector,
@@ -113,6 +134,8 @@
 
 extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
+extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
 extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
 					  struct drm_mode_fb_cmd2 *mode_cmd);
 
@@ -138,10 +161,8 @@
 extern void drm_kms_helper_poll_init(struct drm_device *dev);
 extern void drm_kms_helper_poll_fini(struct drm_device *dev);
 extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
-
-extern bool drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
-    struct drm_cmdline_mode *cmdline_mode);
 #endif

Modified: trunk/sys/dev/drm2/drm_dma.c
===================================================================
--- trunk/sys/dev/drm2/drm_dma.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_dma.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,15 @@
 /* $MidnightBSD$ */
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
 /*-
+ * Created: Fri Mar 19 14:30:16 1999 by faith at valinux.com
+ *
  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,44 +32,49 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_dma.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_dma.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/** @file drm_dma.c
- * Support code for DMA buffer management.
+#include <dev/drm2/drmP.h>
+
+/**
+ * Initialize the DMA data.
  *
- * The implementation used to be significantly more complicated, but the
- * complexity has been moved into the drivers as different buffer management
- * schemes evolved.
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
  */
-
-#include <dev/drm2/drmP.h>
-
 int drm_dma_setup(struct drm_device *dev)
 {
+	int i;
 
 	dev->dma = malloc(sizeof(*dev->dma), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
-	if (dev->dma == NULL)
-		return ENOMEM;
+	if (!dev->dma)
+		return -ENOMEM;
 
-	DRM_SPININIT(&dev->dma_lock, "drmdma");
+	for (i = 0; i <= DRM_MAX_ORDER; i++)
+		memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
 
 	return 0;
 }
 
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
 void drm_dma_takedown(struct drm_device *dev)
 {
-	drm_device_dma_t  *dma = dev->dma;
-	int		  i, j;
+	struct drm_device_dma *dma = dev->dma;
+	int i, j;
 
-	if (dma == NULL)
+	if (!dma)
 		return;
 
 	/* Clear dma buffers */
@@ -66,20 +81,23 @@
 	for (i = 0; i <= DRM_MAX_ORDER; i++) {
 		if (dma->bufs[i].seg_count) {
 			DRM_DEBUG("order %d: buf_count = %d,"
-			    " seg_count = %d\n", i, dma->bufs[i].buf_count,
-			    dma->bufs[i].seg_count);
+				  " seg_count = %d\n",
+				  i,
+				  dma->bufs[i].buf_count,
+				  dma->bufs[i].seg_count);
 			for (j = 0; j < dma->bufs[i].seg_count; j++) {
-				drm_pci_free(dev, dma->bufs[i].seglist[j]);
+				if (dma->bufs[i].seglist[j]) {
+					drm_pci_free(dev, dma->bufs[i].seglist[j]);
+				}
 			}
 			free(dma->bufs[i].seglist, DRM_MEM_SEGS);
 		}
-
-	   	if (dma->bufs[i].buf_count) {
-		   	for (j = 0; j < dma->bufs[i].buf_count; j++) {
+		if (dma->bufs[i].buf_count) {
+			for (j = 0; j < dma->bufs[i].buf_count; j++) {
 				free(dma->bufs[i].buflist[j].dev_private,
 				    DRM_MEM_BUFS);
 			}
-		   	free(dma->bufs[i].buflist, DRM_MEM_BUFS);
+			free(dma->bufs[i].buflist, DRM_MEM_BUFS);
 		}
 	}
 
@@ -87,28 +105,42 @@
 	free(dma->pagelist, DRM_MEM_PAGES);
 	free(dev->dma, DRM_MEM_DRIVER);
 	dev->dma = NULL;
-	DRM_SPINUNINIT(&dev->dma_lock);
 }
 
-
-void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
 {
 	if (!buf)
 		return;
 
-	buf->pending  = 0;
-	buf->file_priv= NULL;
-	buf->used     = 0;
+	buf->waiting = 0;
+	buf->pending = 0;
+	buf->file_priv = NULL;
+	buf->used = 0;
 }
 
-void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+			      struct drm_file *file_priv)
 {
-	drm_device_dma_t *dma = dev->dma;
-	int		 i;
+	struct drm_device_dma *dma = dev->dma;
+	int i;
 
 	if (!dma)
 		return;
-
 	for (i = 0; i < dma->buf_count; i++) {
 		if (dma->buflist[i]->file_priv == file_priv) {
 			switch (dma->buflist[i]->list) {
@@ -126,15 +158,4 @@
 	}
 }
 
-/* Call into the driver-specific DMA handler */
-int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-
-	if (dev->driver->dma_ioctl) {
-		/* shared code returns -errno */
-		return -dev->driver->dma_ioctl(dev, data, file_priv);
-	} else {
-		DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
-		return EINVAL;
-	}
-}
+EXPORT_SYMBOL(drm_core_reclaim_buffers);

Added: trunk/sys/dev/drm2/drm_dp_helper.c
===================================================================
--- trunk/sys/dev/drm2/drm_dp_helper.c	                        (rev 0)
+++ trunk/sys/dev/drm2/drm_dp_helper.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,157 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_dp_helper.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_dp_helper.h>
+
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+			     int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	u8 l = dp_link_status(link_status, i);
+	return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			  int lane_count)
+{
+	u8 lane_align;
+	u8 lane_status;
+	int lane;
+
+	lane_align = dp_link_status(link_status,
+				    DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			      int lane_count)
+{
+	int lane;
+	u8 lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+				     int lane)
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+		 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+					  int lane)
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+		 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+		udelay(100);
+	else
+		mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+		udelay(400);
+	else
+		mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+	switch (link_rate) {
+	case 162000:
+	default:
+		return DP_LINK_BW_1_62;
+	case 270000:
+		return DP_LINK_BW_2_7;
+	case 540000:
+		return DP_LINK_BW_5_4;
+	}
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+	switch (link_bw) {
+	case DP_LINK_BW_1_62:
+	default:
+		return 162000;
+	case DP_LINK_BW_2_7:
+		return 270000;
+	case DP_LINK_BW_5_4:
+		return 540000;
+	}
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);


Property changes on: trunk/sys/dev/drm2/drm_dp_helper.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/dev/drm2/drm_dp_helper.h
===================================================================
--- trunk/sys/dev/drm2/drm_dp_helper.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_dp_helper.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -20,13 +20,25 @@
  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
  * OF THIS SOFTWARE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_dp_helper.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_dp_helper.h 254817 2013-08-24 23:38:57Z dumbbell $
  */
 
 #ifndef _DRM_DP_HELPER_H_
 #define _DRM_DP_HELPER_H_
 
-/* From the VESA DisplayPort spec */
+/*
+ * Unless otherwise noted, all values are from the DP 1.1a spec.  Note that
+ * DP and DPCD versions are independent.  Differences from 1.0 are not noted,
+ * 1.0 devices basically don't exist in the wild.
+ *
+ * Abbreviations, in chronological order:
+ *
+ * eDP: Embedded DisplayPort version 1
+ * DPI: DisplayPort Interoperability Guideline v1.1a
+ * 1.2: DisplayPort 1.2
+ *
+ * 1.2 formally includes both eDP and DPI definitions.
+ */
 
 #define AUX_NATIVE_WRITE	0x8
 #define AUX_NATIVE_READ		0x9
@@ -53,7 +65,7 @@
 
 #define DP_MAX_LANE_COUNT                   0x002
 # define DP_MAX_LANE_COUNT_MASK		    0x1f
-# define DP_TPS3_SUPPORTED		    (1 << 6)
+# define DP_TPS3_SUPPORTED		    (1 << 6) /* 1.2 */
 # define DP_ENHANCED_FRAME_CAP		    (1 << 7)
 
 #define DP_MAX_DOWNSPREAD                   0x003
@@ -69,14 +81,33 @@
 /* 10b = TMDS or HDMI */
 /* 11b = Other */
 # define DP_FORMAT_CONVERSION               (1 << 3)
+# define DP_DETAILED_CAP_INFO_AVAILABLE	    (1 << 4) /* DPI */
 
 #define DP_MAIN_LINK_CHANNEL_CODING         0x006
 
-#define DP_TRAINING_AUX_RD_INTERVAL         0x00e
+#define DP_DOWN_STREAM_PORT_COUNT	    0x007
+# define DP_PORT_COUNT_MASK		    0x0f
+# define DP_MSA_TIMING_PAR_IGNORED	    (1 << 6) /* eDP */
+# define DP_OUI_SUPPORT			    (1 << 7)
 
-#define DP_PSR_SUPPORT                      0x070
+#define DP_I2C_SPEED_CAP		    0x00c    /* DPI */
+# define DP_I2C_SPEED_1K		    0x01
+# define DP_I2C_SPEED_5K		    0x02
+# define DP_I2C_SPEED_10K		    0x04
+# define DP_I2C_SPEED_100K		    0x08
+# define DP_I2C_SPEED_400K		    0x10
+# define DP_I2C_SPEED_1M		    0x20
+
+#define DP_EDP_CONFIGURATION_CAP            0x00d   /* XXX 1.2? */
+#define DP_TRAINING_AUX_RD_INTERVAL         0x00e   /* XXX 1.2? */
+
+/* Multiple stream transport */
+#define DP_MSTM_CAP			    0x021   /* 1.2 */
+# define DP_MST_CAP			    (1 << 0)
+
+#define DP_PSR_SUPPORT                      0x070   /* XXX 1.2? */
 # define DP_PSR_IS_SUPPORTED                1
-#define DP_PSR_CAPS                         0x071
+#define DP_PSR_CAPS                         0x071   /* XXX 1.2? */
 # define DP_PSR_NO_TRAIN_ON_EXIT            1
 # define DP_PSR_SETUP_TIME_330              (0 << 1)
 # define DP_PSR_SETUP_TIME_275              (1 << 1)
@@ -88,11 +119,36 @@
 # define DP_PSR_SETUP_TIME_MASK             (7 << 1)
 # define DP_PSR_SETUP_TIME_SHIFT            1
 
+/*
+ * 0x80-0x8f describe downstream port capabilities, but there are two layouts
+ * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set.  If it was not,
+ * each port's descriptor is one byte wide.  If it was set, each port's is
+ * four bytes wide, starting with the one byte from the base info.  As of
+ * DP interop v1.1a only VGA defines additional detail.
+ */
+
+/* offset 0 */
+#define DP_DOWNSTREAM_PORT_0		    0x80
+# define DP_DS_PORT_TYPE_MASK		    (7 << 0)
+# define DP_DS_PORT_TYPE_DP		    0
+# define DP_DS_PORT_TYPE_VGA		    1
+# define DP_DS_PORT_TYPE_DVI		    2
+# define DP_DS_PORT_TYPE_HDMI		    3
+# define DP_DS_PORT_TYPE_NON_EDID	    4
+# define DP_DS_PORT_HPD			    (1 << 3)
+/* offset 1 for VGA is maximum megapixels per second / 8 */
+/* offset 2 */
+# define DP_DS_VGA_MAX_BPC_MASK		    (3 << 0)
+# define DP_DS_VGA_8BPC			    0
+# define DP_DS_VGA_10BPC		    1
+# define DP_DS_VGA_12BPC		    2
+# define DP_DS_VGA_16BPC		    3
+
 /* link configuration */
 #define	DP_LINK_BW_SET		            0x100
 # define DP_LINK_BW_1_62		    0x06
 # define DP_LINK_BW_2_7			    0x0a
-# define DP_LINK_BW_5_4			    0x14
+# define DP_LINK_BW_5_4			    0x14    /* 1.2 */
 
 #define DP_LANE_COUNT_SET	            0x101
 # define DP_LANE_COUNT_MASK		    0x0f
@@ -102,7 +158,7 @@
 # define DP_TRAINING_PATTERN_DISABLE	    0
 # define DP_TRAINING_PATTERN_1		    1
 # define DP_TRAINING_PATTERN_2		    2
-# define DP_TRAINING_PATTERN_3		    3
+# define DP_TRAINING_PATTERN_3		    3	    /* 1.2 */
 # define DP_TRAINING_PATTERN_MASK	    0x3
 
 # define DP_LINK_QUAL_PATTERN_DISABLE	    (0 << 2)
@@ -143,16 +199,32 @@
 
 #define DP_DOWNSPREAD_CTRL		    0x107
 # define DP_SPREAD_AMP_0_5		    (1 << 4)
+# define DP_MSA_TIMING_PAR_IGNORE_EN	    (1 << 7) /* eDP */
 
 #define DP_MAIN_LINK_CHANNEL_CODING_SET	    0x108
 # define DP_SET_ANSI_8B10B		    (1 << 0)
 
-#define DP_PSR_EN_CFG			    0x170
+#define DP_I2C_SPEED_CONTROL_STATUS	    0x109   /* DPI */
+/* bitmask as for DP_I2C_SPEED_CAP */
+
+#define DP_EDP_CONFIGURATION_SET            0x10a   /* XXX 1.2? */
+
+#define DP_MSTM_CTRL			    0x111   /* 1.2 */
+# define DP_MST_EN			    (1 << 0)
+# define DP_UP_REQ_EN			    (1 << 1)
+# define DP_UPSTREAM_IS_SRC		    (1 << 2)
+
+#define DP_PSR_EN_CFG			    0x170   /* XXX 1.2? */
 # define DP_PSR_ENABLE			    (1 << 0)
 # define DP_PSR_MAIN_LINK_ACTIVE	    (1 << 1)
 # define DP_PSR_CRC_VERIFICATION	    (1 << 2)
 # define DP_PSR_FRAME_CAPTURE		    (1 << 3)
 
+#define DP_SINK_COUNT			    0x200
+/* prior to 1.2 bit 7 was reserved mbz */
+# define DP_GET_SINK_COUNT(x)		    ((((x) & 0x80) >> 1) | ((x) & 0x3f))
+# define DP_SINK_CP_READY		    (1 << 6)
+
 #define DP_DEVICE_SERVICE_IRQ_VECTOR	    0x201
 # define DP_REMOTE_CONTROL_COMMAND_PENDING  (1 << 0)
 # define DP_AUTOMATED_TEST_REQUEST	    (1 << 1)
@@ -210,18 +282,22 @@
 # define DP_TEST_NAK			    (1 << 1)
 # define DP_TEST_EDID_CHECKSUM_WRITE	    (1 << 2)
 
+#define DP_SOURCE_OUI			    0x300
+#define DP_SINK_OUI			    0x400
+#define DP_BRANCH_OUI			    0x500
+
 #define DP_SET_POWER                        0x600
 # define DP_SET_POWER_D0                    0x1
 # define DP_SET_POWER_D3                    0x2
 
-#define DP_PSR_ERROR_STATUS                 0x2006
+#define DP_PSR_ERROR_STATUS                 0x2006  /* XXX 1.2? */
 # define DP_PSR_LINK_CRC_ERROR              (1 << 0)
 # define DP_PSR_RFB_STORAGE_ERROR           (1 << 1)
 
-#define DP_PSR_ESI                          0x2007
+#define DP_PSR_ESI                          0x2007  /* XXX 1.2? */
 # define DP_PSR_CAPS_CHANGE                 (1 << 0)
 
-#define DP_PSR_STATUS                       0x2008
+#define DP_PSR_STATUS                       0x2008  /* XXX 1.2? */
 # define DP_PSR_SINK_INACTIVE               0
 # define DP_PSR_SINK_ACTIVE_SRC_SYNCED      1
 # define DP_PSR_SINK_ACTIVE_RFB             2
@@ -248,4 +324,34 @@
     int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
     void *priv, device_t *bus, device_t *adapter);
 
+
+#define DP_LINK_STATUS_SIZE	   6
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			  int lane_count);
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			      int lane_count);
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+				     int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+					  int lane);
+
+#define DP_RECEIVER_CAP_SIZE	0xf
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+static inline int
+drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+	return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+	return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
 #endif /* _DRM_DP_HELPER_H_ */

Modified: trunk/sys/dev/drm2/drm_dp_iic_helper.c
===================================================================
--- trunk/sys/dev/drm2/drm_dp_iic_helper.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_dp_iic_helper.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -22,7 +22,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_dp_iic_helper.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_dp_iic_helper.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <sys/types.h>
 #include <sys/kobj.h>
@@ -42,7 +42,9 @@
 
 	aux_data = device_get_softc(idev);
 	ret = (*aux_data->aux_ch)(idev, mode, write_byte, read_byte);
-	return (ret);
+	if (ret < 0)
+		return (ret);
+	return (0);
 }
 
 /*
@@ -107,7 +109,7 @@
 	aux_data = device_get_softc(idev);
 
 	if (!aux_data->running)
-		return (EIO);
+		return (-EIO);
 
 	ret = iic_dp_aux_transaction(idev, MODE_I2C_WRITE, byte, NULL);
 	return (ret);
@@ -126,7 +128,7 @@
 	aux_data = device_get_softc(idev);
 
 	if (!aux_data->running)
-		return (EIO);
+		return (-EIO);
 
 	ret = iic_dp_aux_transaction(idev, MODE_I2C_READ, 0, byte_ret);
 	return (ret);
@@ -147,7 +149,7 @@
 		len = msgs[m].len;
 		buf = msgs[m].buf;
 		reading = (msgs[m].flags & IIC_M_RD) != 0;
-		ret = iic_dp_aux_address(idev, msgs[m].slave, reading);
+		ret = iic_dp_aux_address(idev, msgs[m].slave >> 1, reading);
 		if (ret != 0)
 			break;
 		if (reading) {
@@ -168,7 +170,7 @@
 	}
 	iic_dp_aux_stop(idev, reading);
 	DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
-	return (ret);
+	return (-ret);
 }
 
 static void
@@ -184,7 +186,7 @@
 {
 
 	iic_dp_aux_reset_bus(idev);
-	return (0);				   
+	return (0);
 }
 
 static int
@@ -217,22 +219,6 @@
 	return (0);
 }
 
-static int
-iic_dp_aux_detach(device_t idev)
-{
-	struct iic_dp_aux_data *aux_data;
-	device_t port;
-
-	aux_data = device_get_softc(idev);
-
-	port = aux_data->port;
-	bus_generic_detach(idev);
-	if (port != NULL)
-		device_delete_child(idev, port);
-
-	return (0);
-}
-
 int
 iic_dp_aux_add_bus(device_t dev, const char *name,
     int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
@@ -272,13 +258,13 @@
 		*adapter = data->port;
 	}
 	mtx_unlock(&Giant);
-	return (error);
+	return (-error);
 }
 
 static device_method_t drm_iic_dp_aux_methods[] = {
 	DEVMETHOD(device_probe,		iic_dp_aux_probe),
 	DEVMETHOD(device_attach,	iic_dp_aux_attach),
-	DEVMETHOD(device_detach,	iic_dp_aux_detach),
+	DEVMETHOD(device_detach,	bus_generic_detach),
 	DEVMETHOD(iicbus_reset,		iic_dp_aux_reset),
 	DEVMETHOD(iicbus_transfer,	iic_dp_aux_xfer),
 	DEVMETHOD_END

Modified: trunk/sys/dev/drm2/drm_drv.c
===================================================================
--- trunk/sys/dev/drm2/drm_drv.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_drv.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,28 @@
 /* $MidnightBSD$ */
-/*-
+/**
+ * \file drm_drv.c
+ * Generic driver template
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ *
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * \code
+ * #define DRIVER_AUTHOR	"VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME		"mga"
+ * #define DRIVER_DESC		"Matrox G200/G400"
+ * #define DRIVER_DATE		"20001127"
+ *
+ * #define drm_x		mga_##x
+ * \endcode
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth at valinux.com
+ *
  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,84 +45,35 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_drv.c 243442 2012-11-23 11:23:15Z glebius $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_drv.c 297046 2016-03-18 22:52:11Z dumbbell $");
 
-/** @file drm_drv.c
- * The catch-all file for DRM device support, including module setup/teardown,
- * open/close, and ioctl dispatch.
- */
+#include <sys/sysent.h>
 
-#include <sys/limits.h>
-#include <sys/sysent.h>
 #include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
-#include <dev/drm2/drm_sarea.h>
-#include <dev/drm2/drm_mode.h>
+#include <dev/drm2/drm_core.h>
+#include <dev/drm2/drm_global.h>
 
-#ifdef DRM_DEBUG_DEFAULT_ON
-int drm_debug_flag = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS |
-    DRM_DEBUGBITS_FAILED_IOCTL);
-#else
-int drm_debug_flag = 0;
-#endif
-int drm_notyet_flag = 0;
+struct sx drm_global_mutex;
 
-unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
-unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
-
-static int drm_load(struct drm_device *dev);
-static void drm_unload(struct drm_device *dev);
-static drm_pci_id_list_t *drm_find_description(int vendor, int device,
-    drm_pci_id_list_t *idlist);
-
-static int
-drm_modevent(module_t mod, int type, void *data)
-{
-
-	switch (type) {
-	case MOD_LOAD:
-		TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag);
-		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
-		break;
-	}
-	return (0);
-}
-
-static moduledata_t drm_mod = {
-	"drmn",
-	drm_modevent,
-	0
-}; 
-DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
-MODULE_VERSION(drmn, 1);
-MODULE_DEPEND(drmn, agp, 1, 1, 1);
-MODULE_DEPEND(drmn, pci, 1, 1, 1);
-MODULE_DEPEND(drmn, mem, 1, 1, 1);
-MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
-
-static drm_ioctl_desc_t		  drm_ioctls[256] = {
-	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+/** Ioctl table */
+static struct drm_ioctl_desc drm_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
@@ -118,8 +92,8 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
 
-	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
@@ -131,10 +105,12 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
+	/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+	DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
+#if __OS_HAS_AGP
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -143,682 +119,254 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
 
 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+
 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
+	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+#ifdef FREEBSD_NOTYET
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+#endif /* FREEBSD_NOTYET */
+
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
-static struct cdevsw drm_cdevsw = {
-	.d_version =	D_VERSION,
-	.d_open =	drm_open,
-	.d_read =	drm_read,
-	.d_ioctl =	drm_ioctl,
-	.d_poll =	drm_poll,
-	.d_mmap =	drm_mmap,
-	.d_mmap_single = drm_gem_mmap_single,
-	.d_name =	"drm",
-	.d_flags =	D_TRACKCLOSE
-};
+#ifdef COMPAT_FREEBSD32
+extern struct drm_ioctl_desc drm_compat_ioctls[];
+#endif
 
-static int drm_msi = 1;	/* Enable by default. */
-TUNABLE_INT("hw.drm.msi", &drm_msi);
-SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
-SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
-    "Enable MSI interrupts for drm devices");
+#define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
 
-static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
-	{0x8086, 0x2772}, /* Intel i945G	*/ \
-	{0x8086, 0x27A2}, /* Intel i945GM	*/ \
-	{0x8086, 0x27AE}, /* Intel i945GME	*/ \
-	{0, 0}
-};
-
-static int drm_msi_is_blacklisted(int vendor, int device)
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
 {
-	int i = 0;
-	
-	for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
-		if ((drm_msi_blacklist[i].vendor == vendor) &&
-		    (drm_msi_blacklist[i].device == device)) {
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
-{
-	drm_pci_id_list_t *id_entry;
-	int vendor, device;
-
-	vendor = pci_get_vendor(kdev);
-	device = pci_get_device(kdev);
-
-	if (pci_get_class(kdev) != PCIC_DISPLAY
-	    || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
-		return ENXIO;
-
-	id_entry = drm_find_description(vendor, device, idlist);
-	if (id_entry != NULL) {
-		if (!device_get_desc(kdev)) {
-			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
-			device_set_desc(kdev, id_entry->name);
-		}
-		return 0;
-	}
-
-	return ENXIO;
-}
-
-int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
-{
-	struct drm_device *dev;
-	drm_pci_id_list_t *id_entry;
-	int error, msicount;
-
-	dev = device_get_softc(kdev);
-
-	dev->device = kdev;
-
-	dev->pci_domain = pci_get_domain(dev->device);
-	dev->pci_bus = pci_get_bus(dev->device);
-	dev->pci_slot = pci_get_slot(dev->device);
-	dev->pci_func = pci_get_function(dev->device);
-
-	dev->pci_vendor = pci_get_vendor(dev->device);
-	dev->pci_device = pci_get_device(dev->device);
-
-	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
-		if (drm_msi &&
-		    !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
-			msicount = pci_msi_count(dev->device);
-			DRM_DEBUG("MSI count = %d\n", msicount);
-			if (msicount > 1)
-				msicount = 1;
-
-			if (pci_alloc_msi(dev->device, &msicount) == 0) {
-				DRM_INFO("MSI enabled %d message(s)\n",
-				    msicount);
-				dev->msi_enabled = 1;
-				dev->irqrid = 1;
-			}
-		}
-
-		dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
-		    &dev->irqrid, RF_SHAREABLE);
-		if (!dev->irqr) {
-			return (ENOENT);
-		}
-
-		dev->irq = (int) rman_get_start(dev->irqr);
-	}
-
-	mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
-	mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
-	mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
-	mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
-	mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
-	sx_init(&dev->dev_struct_lock, "drmslk");
-
-	id_entry = drm_find_description(dev->pci_vendor,
-	    dev->pci_device, idlist);
-	dev->id_entry = id_entry;
-
-	error = drm_load(dev);
-	if (error == 0)
-		error = drm_create_cdevs(kdev);
-	return (error);
-}
-
-int
-drm_create_cdevs(device_t kdev)
-{
-	struct drm_device *dev;
-	int error, unit;
-
-	unit = device_get_unit(kdev);
-	dev = device_get_softc(kdev);
-
-	error = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &dev->devnode,
-	    &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID,
-	    DRM_DEV_MODE, "dri/card%d", unit);
-	if (error == 0)
-		dev->devnode->si_drv1 = dev;
-	return (error);
-}
-
-int drm_detach(device_t kdev)
-{
-	struct drm_device *dev;
-
-	dev = device_get_softc(kdev);
-	drm_unload(dev);
-	if (dev->irqr) {
-		bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
-		    dev->irqr);
-		if (dev->msi_enabled) {
-			pci_release_msi(dev->device);
-			DRM_INFO("MSI released\n");
-		}
-	}
-	return (0);
-}
-
-#ifndef DRM_DEV_NAME
-#define DRM_DEV_NAME "drm"
+#ifdef __linux__
+	struct drm_vma_entry *vma, *vma_temp;
 #endif
 
-devclass_t drm_devclass;
-
-drm_pci_id_list_t *drm_find_description(int vendor, int device,
-    drm_pci_id_list_t *idlist)
-{
-	int i = 0;
-	
-	for (i = 0; idlist[i].vendor != 0; i++) {
-		if ((idlist[i].vendor == vendor) &&
-		    ((idlist[i].device == device) ||
-		    (idlist[i].device == 0))) {
-			return &idlist[i];
-		}
-	}
-	return NULL;
-}
-
-static int drm_firstopen(struct drm_device *dev)
-{
-	drm_local_map_t *map;
-	int i;
-
-	DRM_LOCK_ASSERT(dev);
-
-	/* prebuild the SAREA */
-	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
-	    _DRM_CONTAINS_LOCK, &map);
-	if (i != 0)
-		return i;
-
-	if (dev->driver->firstopen)
-		dev->driver->firstopen(dev);
-
-	dev->buf_use = 0;
-
-	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
-		i = drm_dma_setup(dev);
-		if (i != 0)
-			return i;
-	}
-
-	for (i = 0; i < DRM_HASH_SIZE; i++) {
-		dev->magiclist[i].head = NULL;
-		dev->magiclist[i].tail = NULL;
-	}
-
-	dev->lock.lock_queue = 0;
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		dev->irq_enabled = 0;
-	dev->context_flag = 0;
-	dev->last_context = 0;
-	dev->if_version = 0;
-
-	dev->buf_sigio = NULL;
-
 	DRM_DEBUG("\n");
 
-	return 0;
-}
-
-static int drm_lastclose(struct drm_device *dev)
-{
-	drm_magic_entry_t *pt, *next;
-	drm_local_map_t *map, *mapsave;
-	int i;
-
-	DRM_LOCK_ASSERT(dev);
-
-	DRM_DEBUG("\n");
-
-	if (dev->driver->lastclose != NULL)
+	if (dev->driver->lastclose)
 		dev->driver->lastclose(dev);
+	DRM_DEBUG("driver lastclose completed\n");
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
+	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_irq_uninstall(dev);
 
-	if (dev->unique) {
-		free(dev->unique, DRM_MEM_DRIVER);
-		dev->unique = NULL;
-		dev->unique_len = 0;
-	}
-	/* Clear pid list */
-	for (i = 0; i < DRM_HASH_SIZE; i++) {
-		for (pt = dev->magiclist[i].head; pt; pt = next) {
-			next = pt->next;
-			free(pt, DRM_MEM_MAGIC);
-		}
-		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
-	}
-
-	DRM_UNLOCK(dev);
-	drm_drawable_free_all(dev);
 	DRM_LOCK(dev);
 
 	/* Clear AGP information */
-	if (dev->agp) {
-		drm_agp_mem_t *entry;
-		drm_agp_mem_t *nexte;
+	if (drm_core_has_AGP(dev) && dev->agp &&
+			!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		struct drm_agp_mem *entry, *tempe;
 
-		/* Remove AGP resources, but leave dev->agp intact until
-		 * drm_unload is called.
-		 */
-		for (entry = dev->agp->memory; entry; entry = nexte) {
-			nexte = entry->next;
+		/* Remove AGP resources, but leave dev->agp
+		   intact until drv_cleanup is called. */
+		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
 			if (entry->bound)
-				drm_agp_unbind_memory(entry->handle);
-			drm_agp_free_memory(entry->handle);
+				drm_unbind_agp(entry->memory);
+			drm_free_agp(entry->memory, entry->pages);
 			free(entry, DRM_MEM_AGPLISTS);
 		}
-		dev->agp->memory = NULL;
+		INIT_LIST_HEAD(&dev->agp->memory);
 
 		if (dev->agp->acquired)
 			drm_agp_release(dev);
 
 		dev->agp->acquired = 0;
-		dev->agp->enabled  = 0;
+		dev->agp->enabled = 0;
 	}
-	if (dev->sg != NULL) {
+	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
 		drm_sg_cleanup(dev->sg);
 		dev->sg = NULL;
 	}
 
-	TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
-		if (!(map->flags & _DRM_DRIVER))
-			drm_rmmap(dev, map);
+#ifdef __linux__
+	/* Clear vma list (only built for debugging) */
+	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+		list_del(&vma->head);
+		kfree(vma);
 	}
+#endif
 
-	drm_dma_takedown(dev);
-	if (dev->lock.hw_lock) {
-		dev->lock.hw_lock = NULL; /* SHM removed */
-		dev->lock.file_priv = NULL;
-		DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
-	}
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+	    !drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_dma_takedown(dev);
 
+	DRM_UNLOCK(dev);
+
+	DRM_DEBUG("lastclose completed\n");
 	return 0;
 }
 
-static int drm_load(struct drm_device *dev)
+#ifdef __linux__
+/** File operations structure */
+static const struct file_operations drm_stub_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_stub_open,
+	.llseek = noop_llseek,
+};
+#endif
+
+static int __init drm_core_init(void)
 {
-	int i, retcode;
 
-	DRM_DEBUG("\n");
+	sx_init(&drm_global_mutex, "drm_global_mutex");
 
-	TAILQ_INIT(&dev->maplist);
-	dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
-	if (dev->map_unrhdr == NULL) {
-		DRM_ERROR("Couldn't allocate map number allocator\n");
-		return EINVAL;
-	}
+	drm_global_init();
 
+#if DRM_LINUX
+	linux_ioctl_register_handler(&drm_handler);
+#endif /* DRM_LINUX */
 
-	drm_mem_init();
-	drm_sysctl_init(dev);
-	TAILQ_INIT(&dev->files);
-
-	dev->counters  = 6;
-	dev->types[0]  = _DRM_STAT_LOCK;
-	dev->types[1]  = _DRM_STAT_OPENS;
-	dev->types[2]  = _DRM_STAT_CLOSES;
-	dev->types[3]  = _DRM_STAT_IOCTLS;
-	dev->types[4]  = _DRM_STAT_LOCKS;
-	dev->types[5]  = _DRM_STAT_UNLOCKS;
-
-	for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
-		atomic_set(&dev->counts[i], 0);
-
-	INIT_LIST_HEAD(&dev->vblank_event_list);
-
-	if (drm_core_has_AGP(dev)) {
-		if (drm_device_is_agp(dev))
-			dev->agp = drm_agp_init();
-		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
-		    dev->agp == NULL) {
-			DRM_ERROR("Card isn't AGP, or couldn't initialize "
-			    "AGP.\n");
-			retcode = ENOMEM;
-			goto error;
-		}
-		if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) {
-			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
-			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
-				dev->agp->mtrr = 1;
-		}
-	}
-
-	retcode = drm_ctxbitmap_init(dev);
-	if (retcode != 0) {
-		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-		goto error;
-	}
-
-	dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
-	if (dev->drw_unrhdr == NULL) {
-		DRM_ERROR("Couldn't allocate drawable number allocator\n");
-		retcode = ENOMEM;
-		goto error;
-	}
-
-	if (dev->driver->driver_features & DRIVER_GEM) {
-		retcode = drm_gem_init(dev);
-		if (retcode != 0) {
-			DRM_ERROR("Cannot initialize graphics execution "
-				  "manager (GEM)\n");
-			goto error1;
-		}
-	}
-
-	if (dev->driver->load != NULL) {
-		DRM_LOCK(dev);
-		/* Shared code returns -errno. */
-		retcode = -dev->driver->load(dev,
-		    dev->id_entry->driver_private);
-		if (pci_enable_busmaster(dev->device))
-			DRM_ERROR("Request to enable bus-master failed.\n");
-		DRM_UNLOCK(dev);
-		if (retcode != 0)
-			goto error;
-	}
-
 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
-	    dev->driver->name,
-	    dev->driver->major,
-	    dev->driver->minor,
-	    dev->driver->patchlevel,
-	    dev->driver->date);
-
+		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
 	return 0;
-
-error1:
-	delete_unrhdr(dev->drw_unrhdr);
-error:
-	drm_sysctl_cleanup(dev);
-	DRM_LOCK(dev);
-	drm_lastclose(dev);
-	DRM_UNLOCK(dev);
-	if (dev->devnode != NULL)
-		destroy_dev(dev->devnode);
-
-	mtx_destroy(&dev->drw_lock);
-	mtx_destroy(&dev->vbl_lock);
-	mtx_destroy(&dev->irq_lock);
-	mtx_destroy(&dev->dev_lock);
-	mtx_destroy(&dev->event_lock);
-	sx_destroy(&dev->dev_struct_lock);
-
-	return retcode;
 }
 
-static void drm_unload(struct drm_device *dev)
+static void __exit drm_core_exit(void)
 {
-	int i;
 
-	DRM_DEBUG("\n");
+#if DRM_LINUX
+	linux_ioctl_unregister_handler(&drm_handler);
+#endif /* DRM_LINUX */
 
-	drm_sysctl_cleanup(dev);
-	if (dev->devnode != NULL)
-		destroy_dev(dev->devnode);
+	drm_global_release();
 
-	drm_ctxbitmap_cleanup(dev);
+	sx_destroy(&drm_global_mutex);
+}
 
-	if (dev->driver->driver_features & DRIVER_GEM)
-		drm_gem_destroy(dev);
+SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
+    drm_core_init, NULL);
+SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
+    drm_core_exit, NULL);
 
-	if (dev->agp && dev->agp->mtrr) {
-		int __unused retcode;
-
-		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
-		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
-		DRM_DEBUG("mtrr_del = %d", retcode);
-	}
-
-	drm_vblank_cleanup(dev);
-
-	DRM_LOCK(dev);
-	drm_lastclose(dev);
-	DRM_UNLOCK(dev);
-
-	/* Clean up PCI resources allocated by drm_bufs.c.  We're not really
-	 * worried about resource consumption while the DRM is inactive (between
-	 * lastclose and firstopen or unload) because these aren't actually
-	 * taking up KVA, just keeping the PCI resource allocated.
-	 */
-	for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
-		if (dev->pcir[i] == NULL)
-			continue;
-		bus_release_resource(dev->device, SYS_RES_MEMORY,
-		    dev->pcirid[i], dev->pcir[i]);
-		dev->pcir[i] = NULL;
-	}
-
-	if (dev->agp) {
-		free(dev->agp, DRM_MEM_AGPLISTS);
-		dev->agp = NULL;
-	}
-
-	if (dev->driver->unload != NULL) {
-		DRM_LOCK(dev);
-		dev->driver->unload(dev);
-		DRM_UNLOCK(dev);
-	}
-
-	delete_unrhdr(dev->drw_unrhdr);
-	delete_unrhdr(dev->map_unrhdr);
-
-	drm_mem_uninit();
-
-	if (pci_disable_busmaster(dev->device))
-		DRM_ERROR("Request to disable bus-master failed.\n");
-
-	mtx_destroy(&dev->drw_lock);
-	mtx_destroy(&dev->vbl_lock);
-	mtx_destroy(&dev->irq_lock);
-	mtx_destroy(&dev->dev_lock);
-	mtx_destroy(&dev->event_lock);
-	sx_destroy(&dev->dev_struct_lock);
-}
-
-int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
 {
-	struct drm_version *version = data;
 	int len;
 
-#define DRM_COPY( name, value )						\
-	len = strlen( value );						\
-	if ( len > name##_len ) len = name##_len;			\
-	name##_len = strlen( value );					\
-	if ( len && name ) {						\
-		if ( DRM_COPY_TO_USER( name, value, len ) )		\
-			return EFAULT;				\
-	}
+	/* don't overflow userbuf */
+	len = strlen(value);
+	if (len > *buf_len)
+		len = *buf_len;
 
-	version->version_major		= dev->driver->major;
-	version->version_minor		= dev->driver->minor;
-	version->version_patchlevel	= dev->driver->patchlevel;
+	/* let userspace know exact length of driver value (which could be
+	 * larger than the userspace-supplied buffer) */
+	*buf_len = strlen(value);
 
-	DRM_COPY(version->name, dev->driver->name);
-	DRM_COPY(version->date, dev->driver->date);
-	DRM_COPY(version->desc, dev->driver->desc);
-
+	/* finally, try filling in the userbuf */
+	if (len && buf)
+		if (copy_to_user(buf, value, len))
+			return -EFAULT;
 	return 0;
 }
 
-int
-drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+int drm_version(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
 {
-	struct drm_device *dev;
-	int retcode;
+	struct drm_version *version = data;
+	int err;
 
-	dev = kdev->si_drv1;
-	if (dev == NULL)
-		return (ENXIO);
+	version->version_major = dev->driver->major;
+	version->version_minor = dev->driver->minor;
+	version->version_patchlevel = dev->driver->patchlevel;
+	err = drm_copy_field(version->name, &version->name_len,
+			dev->driver->name);
+	if (!err)
+		err = drm_copy_field(version->date, &version->date_len,
+				dev->driver->date);
+	if (!err)
+		err = drm_copy_field(version->desc, &version->desc_len,
+				dev->driver->desc);
 
-	DRM_DEBUG("open_count = %d\n", dev->open_count);
-
-	retcode = drm_open_helper(kdev, flags, fmt, p, dev);
-
-	if (retcode == 0) {
-		atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
-		DRM_LOCK(dev);
-		mtx_lock(&Giant);
-		device_busy(dev->device);
-		mtx_unlock(&Giant);
-		if (!dev->open_count++)
-			retcode = drm_firstopen(dev);
-		DRM_UNLOCK(dev);
-	}
-
-	return (retcode);
+	return err;
 }
 
-void drm_close(void *data)
-{
-	struct drm_file *file_priv = data;
-	struct drm_device *dev = file_priv->dev;
-	int retcode = 0;
-
-	DRM_DEBUG("open_count = %d\n", dev->open_count);
-
-	DRM_LOCK(dev);
-
-	if (dev->driver->preclose != NULL)
-		dev->driver->preclose(dev, file_priv);
-
-	/* ========================================================
-	 * Begin inline drm_release
-	 */
-
-	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
-	    DRM_CURRENTPID, (long)dev->device, dev->open_count);
-
-	if (dev->driver->driver_features & DRIVER_GEM)
-		drm_gem_release(dev, file_priv);
-
-	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
-	    && dev->lock.file_priv == file_priv) {
-		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
-			  DRM_CURRENTPID,
-			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-		if (dev->driver->reclaim_buffers_locked != NULL)
-			dev->driver->reclaim_buffers_locked(dev, file_priv);
-
-		drm_lock_free(&dev->lock,
-		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-		
-				/* FIXME: may require heavy-handed reset of
-                                   hardware at this point, possibly
-                                   processed via a callback to the X
-                                   server. */
-	} else if (dev->driver->reclaim_buffers_locked != NULL &&
-	    dev->lock.hw_lock != NULL) {
-		/* The lock is required to reclaim buffers */
-		for (;;) {
-			if (!dev->lock.hw_lock) {
-				/* Device has been unregistered */
-				retcode = EINTR;
-				break;
-			}
-			if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
-				dev->lock.file_priv = file_priv;
-				dev->lock.lock_time = jiffies;
-				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
-				break;	/* Got lock */
-			}
-			/* Contention */
-			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
-			    PCATCH, "drmlk2", 0);
-			if (retcode)
-				break;
-		}
-		if (retcode == 0) {
-			dev->driver->reclaim_buffers_locked(dev, file_priv);
-			drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
-		}
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
-	    !dev->driver->reclaim_buffers_locked)
-		drm_reclaim_buffers(dev, file_priv);
-
-	funsetown(&dev->buf_sigio);
-	seldrain(&file_priv->event_poll);
-
-	if (dev->driver->postclose != NULL)
-		dev->driver->postclose(dev, file_priv);
-	TAILQ_REMOVE(&dev->files, file_priv, link);
-	free(file_priv, DRM_MEM_FILES);
-
-	/* ========================================================
-	 * End inline drm_release
-	 */
-
-	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
-	mtx_lock(&Giant);
-	device_unbusy(dev->device);
-	mtx_unlock(&Giant);
-	if (--dev->open_count == 0) {
-		retcode = drm_lastclose(dev);
-	}
-
-	DRM_UNLOCK(dev);
-}
-
-extern drm_ioctl_desc_t drm_compat_ioctls[];
-
-/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
  */
-int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, 
+int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
     DRM_STRUCTPROC *p)
 {
-	struct drm_device *dev = drm_get_device_from_kdev(kdev);
-	int retcode = 0;
-	drm_ioctl_desc_t *ioctl;
-	int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
-	int nr = DRM_IOCTL_NR(cmd);
-	int is_driver_ioctl = 0;
 	struct drm_file *file_priv;
+	struct drm_device *dev;
+	struct drm_ioctl_desc *ioctl;
+	drm_ioctl_t *func;
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	int retcode;
 
+	dev = drm_get_device_from_kdev(kdev);
+
 	retcode = devfs_get_cdevpriv((void **)&file_priv);
 	if (retcode != 0) {
 		DRM_ERROR("can't find authenticator\n");
@@ -825,182 +373,132 @@
 		return EINVAL;
 	}
 
+	retcode = -EINVAL;
+
+	atomic_inc(&dev->ioctl_count);
 	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
 	++file_priv->ioctl_count;
 
 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
-	    DRM_CURRENTPID, cmd, nr, (long)dev->device,
-	    file_priv->authenticated);
+		  DRM_CURRENTPID, cmd, nr,
+		  (long)file_priv->minor->device,
+		  file_priv->authenticated);
 
 	switch (cmd) {
 	case FIONBIO:
 	case FIOASYNC:
+		atomic_dec(&dev->ioctl_count);
 		return 0;
 
 	case FIOSETOWN:
-		return fsetown(*(int *)data, &dev->buf_sigio);
+		atomic_dec(&dev->ioctl_count);
+		return fsetown(*(int *)data, &file_priv->minor->buf_sigio);
 
 	case FIOGETOWN:
-		*(int *) data = fgetown(&dev->buf_sigio);
+		atomic_dec(&dev->ioctl_count);
+		*(int *) data = fgetown(&file_priv->minor->buf_sigio);
 		return 0;
 	}
 
 	if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
+		atomic_dec(&dev->ioctl_count);
 		DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
 		return EINVAL;
 	}
 
+	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+		goto err_i1;
 #ifdef COMPAT_FREEBSD32
-	/*
-	 * Called whenever a 32-bit process running under a 64-bit
-	 * kernel performs an ioctl on /dev/drm.
-	 */
-	if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL)
+	if (SV_CURPROC_FLAG(SV_ILP32) &&
+	    (nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+	    (nr < DRM_COMMAND_BASE + *dev->driver->num_compat_ioctls) &&
+	    (dev->driver->compat_ioctls[nr - DRM_COMMAND_BASE].func != NULL)) {
+		ioctl = &dev->driver->compat_ioctls[nr - DRM_COMMAND_BASE];
+	} else
+#endif
+	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+	}
+	else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+#ifdef COMPAT_FREEBSD32
 		/*
-		 * Assume that ioctls without an explicit compat
-		 * routine will just work.  This may not always be a
-		 * good assumption, but it's better than always
-		 * failing.
+		 * Called whenever a 32-bit process running under a 64-bit
+		 * kernel performs an ioctl on /dev/drm.
 		 */
-		ioctl = &drm_compat_ioctls[nr];
-	else
+		if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL)
+			/*
+			 * Assume that ioctls without an explicit compat
+			 * routine will just work.  This may not always be a
+			 * good assumption, but it's better than always
+			 * failing.
+			 */
+			ioctl = &drm_compat_ioctls[nr];
+		else
 #endif
-		ioctl = &drm_ioctls[nr];
-	/* It's not a core DRM ioctl, try driver-specific. */
-	if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
-		/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
-		nr -= DRM_COMMAND_BASE;
-		if (nr > dev->driver->max_ioctl) {
-			DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
-			    nr, dev->driver->max_ioctl);
-			return EINVAL;
-		}
+			ioctl = &drm_ioctls[nr];
+	} else
+		goto err_i1;
+
+	/* Do not trust userspace, use our own definition */
+	func = ioctl->func;
+	/* is there a local override? */
+#ifdef FREEBSD_NOTYET
 #ifdef COMPAT_FREEBSD32
-		if (SV_CURPROC_FLAG(SV_ILP32) &&
-		    nr < *dev->driver->compat_ioctls_nr &&
-		    dev->driver->compat_ioctls[nr].func != NULL)
-			ioctl = &dev->driver->compat_ioctls[nr];
-		else
+	if (SV_CURPROC_FLAG(SV_ILP32) &&
+	    (nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_compat_ioctl)
+		func = dev->driver->dma_compat_ioctl;
+	else
 #endif
-			ioctl = &dev->driver->ioctls[nr];
-		is_driver_ioctl = 1;
-	}
-	func = ioctl->func;
+#endif /* FREEBSD_NOTYET */
+	if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+		func = dev->driver->dma_ioctl;
 
-	if (func == NULL) {
+	if (!func) {
 		DRM_DEBUG("no function\n");
-		return EINVAL;
-	}
-
-	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
-	    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
-	    ((ioctl->flags & DRM_MASTER) && !file_priv->master))
-		return EACCES;
-
-	if (is_driver_ioctl) {
-		if ((ioctl->flags & DRM_UNLOCKED) == 0)
-			DRM_LOCK(dev);
-		/* shared code returns -errno */
-		retcode = -func(dev, data, file_priv);
-		if ((ioctl->flags & DRM_UNLOCKED) == 0)
-			DRM_UNLOCK(dev);
+		retcode = -EINVAL;
+	} else if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
+		   ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+		   ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
+		   (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+		retcode = -EACCES;
 	} else {
-		retcode = func(dev, data, file_priv);
+		if (ioctl->flags & DRM_UNLOCKED)
+			retcode = func(dev, data, file_priv);
+		else {
+			sx_xlock(&drm_global_mutex);
+			retcode = func(dev, data, file_priv);
+			sx_xunlock(&drm_global_mutex);
+		}
 	}
 
-	if (retcode != 0)
-		DRM_DEBUG("    returning %d\n", retcode);
+      err_i1:
+	atomic_dec(&dev->ioctl_count);
+	if (retcode)
+		DRM_DEBUG("ret = %d\n", retcode);
 	if (retcode != 0 &&
-	    (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
+	    (drm_debug & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
 		printf(
-"pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
-		    DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device,
-		    file_priv->authenticated, retcode);
+"pid %d, cmd 0x%02lx, nr 0x%02x, dev 0x%lx, auth %d, res %d\n",
+		    DRM_CURRENTPID, cmd, nr, (long)file_priv->minor->device,
+		    file_priv->authenticated, -retcode);
 	}
 
-	return retcode;
+	return -retcode;
 }
+EXPORT_SYMBOL(drm_ioctl);
 
-drm_local_map_t *drm_getsarea(struct drm_device *dev)
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
 {
-	drm_local_map_t *map;
+	struct drm_map_list *entry;
 
-	DRM_LOCK_ASSERT(dev);
-	TAILQ_FOREACH(map, &dev->maplist, link) {
-		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
-			return map;
+	list_for_each_entry(entry, &dev->maplist, head) {
+		if (entry->map && entry->map->type == _DRM_SHM &&
+		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+			return entry->map;
+		}
 	}
-
 	return NULL;
 }
-
-int
-drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
-    struct sysctl_oid *top)
-{
-	struct sysctl_oid *oid;
-
-	snprintf(dev->busid_str, sizeof(dev->busid_str),
-	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
-	     dev->pci_slot, dev->pci_func);
-	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
-	    CTLFLAG_RD, dev->busid_str, 0, NULL);
-	if (oid == NULL)
-		return (ENOMEM);
-	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
-	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
-	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
-	if (oid == NULL)
-		return (ENOMEM);
-
-	return (0);
-}
-
-#if DRM_LINUX
-
-#include <sys/sysproto.h>
-
-MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
-
-#define LINUX_IOCTL_DRM_MIN		0x6400
-#define LINUX_IOCTL_DRM_MAX		0x64ff
-
-static linux_ioctl_function_t drm_linux_ioctl;
-static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, 
-    LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
-
-SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE, 
-    linux_ioctl_register_handler, &drm_handler);
-SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, 
-    linux_ioctl_unregister_handler, &drm_handler);
-
-/* The bits for in/out are switched on Linux */
-#define LINUX_IOC_IN	IOC_OUT
-#define LINUX_IOC_OUT	IOC_IN
-
-static int
-drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
-{
-	int error;
-	int cmd = args->cmd;
-
-	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
-	if (cmd & LINUX_IOC_IN)
-		args->cmd |= IOC_IN;
-	if (cmd & LINUX_IOC_OUT)
-		args->cmd |= IOC_OUT;
-	
-	error = ioctl(p, (struct ioctl_args *)args);
-
-	return error;
-}
-#endif /* DRM_LINUX */
-
-bool
-dmi_check_system(const struct dmi_system_id *sysid)
-{
-
-	/* XXXKIB */
-	return (false);
-}
-
+EXPORT_SYMBOL(drm_getsarea);

Modified: trunk/sys/dev/drm2/drm_edid.c
===================================================================
--- trunk/sys/dev/drm2/drm_edid.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_edid.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -30,11 +30,11 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_edid.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_edid.c 299622 2016-05-13 08:39:33Z ngie $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm_edid.h>
-#include <dev/drm2/drm_edid_modes.h>
+#include "drm_edid_modes.h"
 #include <dev/iicbus/iic.h>
 #include <dev/iicbus/iiconf.h>
 #include "iicbus_if.h"
@@ -70,6 +70,8 @@
 #define EDID_QUIRK_FIRST_DETAILED_PREFERRED	(1 << 5)
 /* use +hsync +vsync for detailed mode */
 #define EDID_QUIRK_DETAILED_SYNC_PP		(1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING	(1 << 7)
 
 struct detailed_mode_closure {
 	struct drm_connector *connector;
@@ -85,7 +87,7 @@
 #define LEVEL_CVT	3
 
 static struct edid_quirk {
-	char *vendor;
+	char vendor[4];
 	int product_id;
 	u32 quirks;
 } edid_quirk_list[] = {
@@ -124,6 +126,9 @@
 	/* Samsung SyncMaster 22[5-6]BW */
 	{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
 	{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+	/* ViewSonic VA2026w */
+	{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 };
 
 /*** DDC fetch and block validation ***/
@@ -146,22 +151,30 @@
 
 	return score;
 }
+EXPORT_SYMBOL(drm_edid_header_is_valid);
 
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+		 "Minimum number of valid EDID header bytes (0-8, default 6)");
+
 /*
  * Sanity check the EDID block (base or extension).  Return 0 if the block
  * doesn't check out, or 1 if it's valid.
  */
-static bool
-drm_edid_block_valid(u8 *raw_edid)
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
 {
 	int i;
 	u8 csum = 0;
 	struct edid *edid = (struct edid *)raw_edid;
 
-	if (raw_edid[0] == 0x00) {
+	if (edid_fixup > 8 || edid_fixup < 0)
+		edid_fixup = 6;
+
+	if (block == 0) {
 		int score = drm_edid_header_is_valid(raw_edid);
 		if (score == 8) ;
-		else if (score >= 6) {
+		else if (score >= edid_fixup) {
 			DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
 			memcpy(raw_edid, edid_header, sizeof(edid_header));
 		} else {
@@ -172,7 +185,9 @@
 	for (i = 0; i < EDID_LENGTH; i++)
 		csum += raw_edid[i];
 	if (csum) {
-		DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+		if (print_bad_edid) {
+			DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+		}
 
 		/* allow CEA to slide through, switches mangle this */
 		if (raw_edid[0] != 0x02)
@@ -198,23 +213,22 @@
 	return 1;
 
 bad:
-	if (raw_edid) {
+	if (raw_edid && print_bad_edid) {
 		DRM_DEBUG_KMS("Raw EDID:\n");
-		if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) {
-			for (i = 0; i < EDID_LENGTH; ) {
-				printf("%02x", raw_edid[i]);
-				i++;
-				if (i % 16 == 0 || i == EDID_LENGTH)
-					printf("\n");
-				else if (i % 8 == 0)
-					printf("  ");
-				else
-					printf(" ");
-			}
+		for (i = 0; i < EDID_LENGTH; ) {
+			printf("%02x", raw_edid[i]);
+			i++;
+			if (i % 16 == 0 || i == EDID_LENGTH)
+				printf("\n");
+			else if (i % 8 == 0)
+				printf("  ");
+			else
+				printf(" ");
 		}
 	}
 	return 0;
 }
+EXPORT_SYMBOL(drm_edid_block_valid);
 
 /**
  * drm_edid_is_valid - sanity check EDID data
@@ -231,13 +245,13 @@
 		return false;
 
 	for (i = 0; i <= edid->extensions; i++)
-		if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+		if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
 			return false;
 
 	return true;
 }
+EXPORT_SYMBOL(drm_edid_is_valid);
 
-#define DDC_ADDR 0x50
 #define DDC_SEGMENT_ADDR 0x30
 /**
  * Get EDID information via I2C.
@@ -254,6 +268,8 @@
 		      int block, int len)
 {
 	unsigned char start = block * EDID_LENGTH;
+	unsigned char segment = block >> 1;
+	unsigned char xfers = segment ? 3 : 2;
 	int ret, retries = 5;
 
 	/* The core i2c driver will automatically retry the transfer if the
@@ -265,24 +281,35 @@
 	do {
 		struct iic_msg msgs[] = {
 			{
-				.slave	= DDC_ADDR,
-				.flags	= IIC_M_WR,
+				.slave	= DDC_SEGMENT_ADDR << 1,
+				.flags	= 0,
 				.len	= 1,
+				.buf	= &segment,
+			}, {
+				.slave	= DDC_ADDR << 1,
+				.flags	= 0,
+				.len	= 1,
 				.buf	= &start,
 			}, {
-				.slave	= DDC_ADDR,
+				.slave	= DDC_ADDR << 1,
 				.flags	= IIC_M_RD,
 				.len	= len,
 				.buf	= buf,
 			}
 		};
-		ret = iicbus_transfer(adapter, msgs, 2);
+
+	/*
+	 * Avoid sending the segment addr to not upset non-compliant ddc
+	 * monitors.
+	 */
+		ret = iicbus_transfer(adapter, &msgs[3 - xfers], xfers);
+
 		if (ret != 0)
 			DRM_DEBUG_KMS("iicbus_transfer countdown %d error %d\n",
 			    retries, ret);
 	} while (ret != 0 && --retries);
 
-	return (ret == 0 ? 0 : -1);
+	return ret == 0 ? 0 : -1;
 }
 
 static bool drm_edid_is_zero(u8 *in_edid, int length)
@@ -293,6 +320,7 @@
 	for (i = 0; i < length / 4; i++)
 		if (*(raw_edid + i) != 0)
 			return false;
+
 	return true;
 }
 
@@ -301,14 +329,16 @@
 {
 	int i, j = 0, valid_extensions = 0;
 	u8 *block, *new;
+	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_DEBUGBITS_KMS);
 
-	block = malloc(EDID_LENGTH, DRM_MEM_KMS, M_WAITOK | M_ZERO);
+	if ((block = malloc(EDID_LENGTH, DRM_MEM_KMS, M_NOWAIT)) == NULL)
+		return NULL;
 
 	/* base block fetch */
 	for (i = 0; i < 4; i++) {
 		if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
 			goto out;
-		if (drm_edid_block_valid(block))
+		if (drm_edid_block_valid(block, 0, print_bad_edid))
 			break;
 		if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
 			connector->null_edid_counter++;
@@ -323,7 +353,11 @@
 		return block;
 
 	new = reallocf(block, (block[0x7e] + 1) * EDID_LENGTH, DRM_MEM_KMS,
-	    M_WAITOK);
+	    M_NOWAIT);
+	if (!new) {
+		block = NULL;
+		goto out;
+	}
 	block = new;
 
 	for (j = 1; j <= block[0x7e]; j++) {
@@ -332,14 +366,19 @@
 				  block + (valid_extensions + 1) * EDID_LENGTH,
 				  j, EDID_LENGTH))
 				goto out;
-			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
 				valid_extensions++;
 				break;
 			}
 		}
-		if (i == 4)
-			DRM_DEBUG_KMS("%s: Ignoring invalid EDID block %d.\n",
-			     drm_get_connector_name(connector), j);
+
+		if (i == 4 && print_bad_edid) {
+			dev_warn(connector->dev->dev,
+			 "%s: Ignoring invalid EDID block %d.\n",
+			 drm_get_connector_name(connector), j);
+
+			connector->bad_edid_counter++;
+		}
 	}
 
 	if (valid_extensions != block[0x7e]) {
@@ -346,16 +385,20 @@
 		block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
 		block[0x7e] = valid_extensions;
 		new = reallocf(block, (valid_extensions + 1) * EDID_LENGTH,
-		    DRM_MEM_KMS, M_WAITOK);
+		    DRM_MEM_KMS, M_NOWAIT);
+		if (!new)
+			goto out;
 		block = new;
 	}
 
-	DRM_DEBUG_KMS("got EDID from %s\n", drm_get_connector_name(connector));
 	return block;
 
 carp:
-	DRM_ERROR("%s: EDID block %d invalid.\n",
-	    drm_get_connector_name(connector), j);
+	if (print_bad_edid) {
+		dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+			 drm_get_connector_name(connector), j);
+	}
+	connector->bad_edid_counter++;
 
 out:
 	free(block, DRM_MEM_KMS);
@@ -368,7 +411,7 @@
  * \param adapter : i2c device adaptor
  * \return 1 on success
  */
-static bool
+bool
 drm_probe_ddc(device_t adapter)
 {
 	unsigned char out;
@@ -375,6 +418,7 @@
 
 	return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
 }
+EXPORT_SYMBOL(drm_probe_ddc);
 
 /**
  * drm_get_edid - get EDID data, if available
@@ -394,11 +438,9 @@
 	if (drm_probe_ddc(adapter))
 		edid = (struct edid *)drm_do_get_edid(connector, adapter);
 
-	connector->display_info.raw_edid = (char *)edid;
-
 	return edid;
-
 }
+EXPORT_SYMBOL(drm_get_edid);
 
 /*** EDID parsing ***/
 
@@ -432,7 +474,7 @@
 	struct edid_quirk *quirk;
 	int i;
 
-	for (i = 0; i < DRM_ARRAY_SIZE(edid_quirk_list); i++) {
+	for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
 		quirk = &edid_quirk_list[i];
 
 		if (edid_vendor(edid, quirk->vendor) &&
@@ -492,24 +534,49 @@
 	preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
 }
 
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+	return (mode->htotal - mode->hdisplay == 160) &&
+	       (mode->hsync_end - mode->hdisplay == 80) &&
+	       (mode->hsync_end - mode->hsync_start == 32) &&
+	       (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ * Return a newly allocated copy of the mode, or NULL if not found.
+ */
 struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
-					   int hsize, int vsize, int fresh)
+					   int hsize, int vsize, int fresh,
+					   bool rb)
 {
-	struct drm_display_mode *mode = NULL;
 	int i;
 
 	for (i = 0; i < drm_num_dmt_modes; i++) {
-		struct drm_display_mode *ptr = &drm_dmt_modes[i];
-		if (hsize == ptr->hdisplay &&
-			vsize == ptr->vdisplay &&
-			fresh == drm_mode_vrefresh(ptr)) {
-			/* get the expected default mode */
-			mode = drm_mode_duplicate(dev, ptr);
-			break;
-		}
+		const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+		if (hsize != ptr->hdisplay)
+			continue;
+		if (vsize != ptr->vdisplay)
+			continue;
+		if (fresh != drm_mode_vrefresh(ptr))
+			continue;
+		if (rb != mode_is_rb(ptr))
+			continue;
+
+		return drm_mode_duplicate(dev, ptr);
 	}
-	return mode;
+
+	return NULL;
 }
+EXPORT_SYMBOL(drm_mode_find_dmt);
 
 typedef void detailed_cb(struct detailed_timing *timing, void *closure);
 
@@ -517,25 +584,10 @@
 cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
 {
 	int i, n = 0;
-	u8 rev = ext[0x01], d = ext[0x02];
+	u8 d = ext[0x02];
 	u8 *det_base = ext + d;
 
-	switch (rev) {
-	case 0:
-		/* can't happen */
-		return;
-	case 1:
-		/* have to infer how many blocks we have, check pixel clock */
-		for (i = 0; i < 6; i++)
-			if (det_base[18*i] || det_base[18*i+1])
-				n++;
-		break;
-	default:
-		/* explicit count */
-		n = min(ext[0x03] & 0x0f, 6);
-		break;
-	}
-
+	n = (127 - d) / 18;
 	for (i = 0; i < n; i++)
 		cb((struct detailed_timing *)(det_base + 18 * i), closure);
 }
@@ -594,7 +646,7 @@
 drm_monitor_supports_rb(struct edid *edid)
 {
 	if (edid->revision >= 4) {
-		bool ret;
+		bool ret = false;
 		drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
 		return ret;
 	}
@@ -751,10 +803,17 @@
 	}
 
 	/* check whether it can be found in default mode table */
-	mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
+	if (drm_monitor_supports_rb(edid)) {
+		mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+					 true);
+		if (mode)
+			return mode;
+	}
+	mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
 	if (mode)
 		return mode;
 
+	/* okay, generate it */
 	switch (timing_level) {
 	case LEVEL_DMT:
 		break;
@@ -768,8 +827,10 @@
 		 * secondary GTF curve.  Please don't do that.
 		 */
 		mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+		if (!mode)
+			return NULL;
 		if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
-			free(mode, DRM_MEM_KMS);
+			drm_mode_destroy(dev, mode);
 			mode = drm_gtf_mode_complex(dev, hsize, vsize,
 						    vrefresh_rate, 0, 0,
 						    drm_gtf2_m(edid),
@@ -814,7 +875,7 @@
 	if (!(pt->misc & DRM_EDID_PT_INTERLACED))
 		return;
 
-	for (i = 0; i < DRM_ARRAY_SIZE(cea_interlaced); i++) {
+	for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
 		if ((mode->hdisplay == cea_interlaced[i].w) &&
 		    (mode->vdisplay == cea_interlaced[i].h / 2)) {
 			mode->vdisplay *= 2;
@@ -851,7 +912,7 @@
 	unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
 	unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
 	unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
-	unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+	unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
 	unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
 
 	/* ignore tiny modes */
@@ -872,16 +933,23 @@
 				"Wrong Hsync/Vsync pulse width\n");
 		return NULL;
 	}
+
+	if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+		mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+		if (!mode)
+			return NULL;
+
+		goto set_size;
+	}
+
 	mode = drm_mode_create(dev);
 	if (!mode)
 		return NULL;
 
-	mode->type = DRM_MODE_TYPE_DRIVER;
-
 	if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
-		timing->pixel_clock = htole16(1088);
+		timing->pixel_clock = cpu_to_le16(1088);
 
-	mode->clock = le16toh(timing->pixel_clock) * 10;
+	mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
 
 	mode->hdisplay = hactive;
 	mode->hsync_start = mode->hdisplay + hsync_offset;
@@ -901,8 +969,6 @@
 
 	drm_mode_do_interlace_quirk(mode, pt);
 
-	drm_mode_set_name(mode);
-
 	if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
 		pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
 	}
@@ -912,6 +978,7 @@
 	mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
 		DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
 
+set_size:
 	mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
 	mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
 
@@ -925,20 +992,15 @@
 		mode->height_mm = edid->height_cm * 10;
 	}
 
+	mode->type = DRM_MODE_TYPE_DRIVER;
+	mode->vrefresh = drm_mode_vrefresh(mode);
+	drm_mode_set_name(mode);
+
 	return mode;
 }
 
 static bool
-mode_is_rb(const struct drm_display_mode *mode)
-{
-	return (mode->htotal - mode->hdisplay == 160) &&
-	       (mode->hsync_end - mode->hdisplay == 80) &&
-	       (mode->hsync_end - mode->hsync_start == 32) &&
-	       (mode->vsync_start - mode->vdisplay == 3);
-}
-
-static bool
-mode_in_hsync_range(struct drm_display_mode *mode,
+mode_in_hsync_range(const struct drm_display_mode *mode,
 		    struct edid *edid, u8 *t)
 {
 	int hsync, hmin, hmax;
@@ -955,7 +1017,7 @@
 }
 
 static bool
-mode_in_vsync_range(struct drm_display_mode *mode,
+mode_in_vsync_range(const struct drm_display_mode *mode,
 		    struct edid *edid, u8 *t)
 {
 	int vsync, vmin, vmax;
@@ -987,7 +1049,7 @@
 }
 
 static bool
-mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
 	      struct detailed_timing *timing)
 {
 	u32 max_clock;
@@ -1014,12 +1076,26 @@
 	return true;
 }
 
-/*
- * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
- * need to account for them.
- */
+static bool valid_inferred_mode(const struct drm_connector *connector,
+				const struct drm_display_mode *mode)
+{
+	struct drm_display_mode *m;
+	bool ok = false;
+
+	list_for_each_entry(m, &connector->probed_modes, head) {
+		if (mode->hdisplay == m->hdisplay &&
+		    mode->vdisplay == m->vdisplay &&
+		    drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+			return false; /* duplicated */
+		if (mode->hdisplay <= m->hdisplay &&
+		    mode->vdisplay <= m->vdisplay)
+			ok = true;
+	}
+	return ok;
+}
+
 static int
-drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 			struct detailed_timing *timing)
 {
 	int i, modes = 0;
@@ -1027,7 +1103,8 @@
 	struct drm_device *dev = connector->dev;
 
 	for (i = 0; i < drm_num_dmt_modes; i++) {
-		if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+		if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+		    valid_inferred_mode(connector, drm_dmt_modes + i)) {
 			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
 			if (newmode) {
 				drm_mode_probed_add(connector, newmode);
@@ -1039,17 +1116,112 @@
 	return modes;
 }
 
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
+ */
+static void fixup_mode_1366x768(struct drm_display_mode *mode)
+{
+	if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+		mode->hdisplay = 1366;
+		mode->hsync_start--;
+		mode->hsync_end--;
+		drm_mode_set_name(mode);
+	}
+}
+
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+			struct detailed_timing *timing)
+{
+	int i, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+
+	for (i = 0; i < num_extra_modes; i++) {
+		const struct minimode *m = &extra_modes[i];
+		newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+		if (!newmode)
+			return modes;
+
+		fixup_mode_1366x768(newmode);
+		if (!mode_in_range(newmode, edid, timing) ||
+		    !valid_inferred_mode(connector, newmode)) {
+			drm_mode_destroy(dev, newmode);
+			continue;
+		}
+
+		drm_mode_probed_add(connector, newmode);
+		modes++;
+	}
+
+	return modes;
+}
+
+static int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+			struct detailed_timing *timing)
+{
+	int i, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+	bool rb = drm_monitor_supports_rb(edid);
+
+	for (i = 0; i < num_extra_modes; i++) {
+		const struct minimode *m = &extra_modes[i];
+		newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+		if (!newmode)
+			return modes;
+
+		fixup_mode_1366x768(newmode);
+		if (!mode_in_range(newmode, edid, timing) ||
+		    !valid_inferred_mode(connector, newmode)) {
+			drm_mode_destroy(dev, newmode);
+			continue;
+		}
+
+		drm_mode_probed_add(connector, newmode);
+		modes++;
+	}
+
+	return modes;
+}
+
 static void
 do_inferred_modes(struct detailed_timing *timing, void *c)
 {
 	struct detailed_mode_closure *closure = c;
 	struct detailed_non_pixel *data = &timing->data.other_data;
-	int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+	struct detailed_data_monitor_range *range = &data->data.range;
 
-	if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
+	if (data->type != EDID_DETAIL_MONITOR_RANGE)
+		return;
+
+	closure->modes += drm_dmt_modes_for_range(closure->connector,
+						  closure->edid,
+						  timing);
+	
+	if (!version_greater(closure->edid, 1, 1))
+		return; /* GTF not defined yet */
+
+	switch (range->flags) {
+	case 0x02: /* secondary gtf, XXX could do more */
+	case 0x00: /* default gtf */
 		closure->modes += drm_gtf_modes_for_range(closure->connector,
 							  closure->edid,
 							  timing);
+		break;
+	case 0x04: /* cvt, only in 1.4+ */
+		if (!version_greater(closure->edid, 1, 3))
+			break;
+
+		closure->modes += drm_cvt_modes_for_range(closure->connector,
+							  closure->edid,
+							  timing);
+		break;
+	case 0x01: /* just the ranges, no formula */
+	default:
+		break;
+	}
 }
 
 static int
@@ -1076,14 +1248,14 @@
 	for (i = 0; i < 6; i++) {
 		for (j = 7; j > 0; j--) {
 			m = (i * 8) + (7 - j);
-			if (m >= DRM_ARRAY_SIZE(est3_modes))
+			if (m >= ARRAY_SIZE(est3_modes))
 				break;
 			if (est[i] & (1 << j)) {
 				mode = drm_mode_find_dmt(connector->dev,
 							 est3_modes[m].w,
 							 est3_modes[m].h,
-							 est3_modes[m].r
-							 /*, est3_modes[m].rb */);
+							 est3_modes[m].r,
+							 est3_modes[m].rb);
 				if (mode) {
 					drm_mode_probed_add(connector, mode);
 					modes++;
@@ -1328,9 +1500,12 @@
 
 #define HDMI_IDENTIFIER 0x000C03
 #define AUDIO_BLOCK	0x01
+#define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
 #define SPEAKER_BLOCK	0x04
 #define EDID_BASIC_AUDIO	(1 << 6)
+#define EDID_CEA_YCRCB444	(1 << 5)
+#define EDID_CEA_YCRCB422	(1 << 4)
 
 /**
  * Search EDID for CEA extension block.
@@ -1356,22 +1531,134 @@
 
 	return edid_ext;
 }
+EXPORT_SYMBOL(drm_find_cea_extension);
 
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+	const struct drm_display_mode *cea_mode;
+	u8 mode;
+
+	for (mode = 0; mode < drm_num_cea_modes; mode++) {
+		cea_mode = (const struct drm_display_mode *)&edid_cea_modes[mode];
+
+		if (drm_mode_equal(to_match, cea_mode))
+			return mode + 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+	struct drm_device *dev = connector->dev;
+	u8 * mode, cea_mode;
+	int modes = 0;
+
+	for (mode = db; mode < db + len; mode++) {
+		cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+		if (cea_mode < drm_num_cea_modes) {
+			struct drm_display_mode *newmode;
+			newmode = drm_mode_duplicate(dev,
+						     &edid_cea_modes[cea_mode]);
+			if (newmode) {
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+	}
+
+	return modes;
+}
+
+static int
+cea_db_payload_len(const u8 *db)
+{
+	return db[0] & 0x1f;
+}
+
+static int
+cea_db_tag(const u8 *db)
+{
+	return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+	return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+	/* Data block offset in CEA extension block */
+	*start = 4;
+	*end = cea[2];
+	if (*end == 0)
+		*end = 127;
+	if (*end < 4 || *end > 127)
+		return -ERANGE;
+	return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+	u8 * cea = drm_find_cea_extension(edid);
+	u8 * db, dbl;
+	int modes = 0;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return 0;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			dbl = cea_db_payload_len(db);
+
+			if (cea_db_tag(db) == VIDEO_BLOCK)
+				modes += do_cea_modes (connector, db+1, dbl);
+		}
+	}
+
+	return modes;
+}
+
 static void
-parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
 {
-	connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
+	u8 len = cea_db_payload_len(db);
 
-	connector->dvi_dual = db[6] & 1;
-	connector->max_tmds_clock = db[7] * 5;
+	if (len >= 6) {
+		connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
+		connector->dvi_dual = db[6] & 1;
+	}
+	if (len >= 7)
+		connector->max_tmds_clock = db[7] * 5;
+	if (len >= 8) {
+		connector->latency_present[0] = db[8] >> 7;
+		connector->latency_present[1] = (db[8] >> 6) & 1;
+	}
+	if (len >= 9)
+		connector->video_latency[0] = db[9];
+	if (len >= 10)
+		connector->audio_latency[0] = db[10];
+	if (len >= 11)
+		connector->video_latency[1] = db[11];
+	if (len >= 12)
+		connector->audio_latency[1] = db[12];
 
-	connector->latency_present[0] = db[8] >> 7;
-	connector->latency_present[1] = (db[8] >> 6) & 1;
-	connector->video_latency[0] = db[9];
-	connector->audio_latency[0] = db[10];
-	connector->video_latency[1] = db[11];
-	connector->audio_latency[1] = db[12];
-
 	DRM_DEBUG_KMS("HDMI: DVI dual %d, "
 		    "max TMDS clock %d, "
 		    "latency present %d %d, "
@@ -1394,6 +1681,21 @@
 		*(u8 **)data = t->data.other_data.data.str.str;
 }
 
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 5)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IDENTIFIER;
+}
+
 /**
  * drm_edid_to_eld - build ELD from EDID
  * @connector: connector corresponding to the HDMI/DP sink
@@ -1440,25 +1742,39 @@
 	eld[18] = edid->prod_code[0];
 	eld[19] = edid->prod_code[1];
 
-	for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
-		dbl = db[0] & 0x1f;
+	if (cea_revision(cea) >= 3) {
+		int i, start, end;
 
-		switch ((db[0] & 0xe0) >> 5) {
-		case AUDIO_BLOCK:	/* Audio Data Block, contains SADs */
-			sad_count = dbl / 3;
-			memcpy(eld + 20 + mnl, &db[1], dbl);
-			break;
-		case SPEAKER_BLOCK:	/* Speaker Allocation Data Block */
-			eld[7] = db[1];
-			break;
-		case VENDOR_BLOCK:
-			/* HDMI Vendor-Specific Data Block */
-			if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
-				parse_hdmi_vsdb(connector, db);
-			break;
-		default:
-			break;
+		if (cea_db_offsets(cea, &start, &end)) {
+			start = 0;
+			end = 0;
 		}
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			dbl = cea_db_payload_len(db);
+
+			switch (cea_db_tag(db)) {
+			case AUDIO_BLOCK:
+				/* Audio Data Block, contains SADs */
+				sad_count = dbl / 3;
+				if (dbl >= 1)
+					memcpy(eld + 20 + mnl, &db[1], dbl);
+				break;
+			case SPEAKER_BLOCK:
+				/* Speaker Allocation Data Block */
+				if (dbl >= 1)
+					eld[7] = db[1];
+				break;
+			case VENDOR_BLOCK:
+				/* HDMI Vendor-Specific Data Block */
+				if (cea_db_is_hdmi_vsdb(db))
+					parse_hdmi_vsdb(connector, db);
+				break;
+			default:
+				break;
+			}
+		}
 	}
 	eld[5] |= sad_count << 4;
 	eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
@@ -1465,6 +1781,7 @@
 
 	DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
 }
+EXPORT_SYMBOL(drm_edid_to_eld);
 
 /**
  * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
@@ -1502,6 +1819,7 @@
 
 	return max(v - a, 0);
 }
+EXPORT_SYMBOL(drm_av_sync_delay);
 
 /**
  * drm_select_eld - select one ELD from multiple HDMI/DP sinks
@@ -1523,6 +1841,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(drm_select_eld);
 
 /**
  * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
@@ -1534,39 +1853,28 @@
 bool drm_detect_hdmi_monitor(struct edid *edid)
 {
 	u8 *edid_ext;
-	int i, hdmi_id;
+	int i;
 	int start_offset, end_offset;
-	bool is_hdmi = false;
 
 	edid_ext = drm_find_cea_extension(edid);
 	if (!edid_ext)
-		goto end;
+		return false;
 
-	/* Data block offset in CEA extension block */
-	start_offset = 4;
-	end_offset = edid_ext[2];
+	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+		return false;
 
 	/*
 	 * Because HDMI identifier is in Vendor Specific Block,
 	 * search it from all data blocks of CEA extension.
 	 */
-	for (i = start_offset; i < end_offset;
-		/* Increased by data block len */
-		i += ((edid_ext[i] & 0x1f) + 1)) {
-		/* Find vendor specific block */
-		if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
-			hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
-				  edid_ext[i + 3] << 16;
-			/* Find HDMI identifier */
-			if (hdmi_id == HDMI_IDENTIFIER)
-				is_hdmi = true;
-			break;
-		}
+	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+		if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+			return true;
 	}
 
-end:
-	return is_hdmi;
+	return false;
 }
+EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 
 /**
  * drm_detect_monitor_audio - check monitor audio capability
@@ -1596,15 +1904,13 @@
 		goto end;
 	}
 
-	/* Data block offset in CEA extension block */
-	start_offset = 4;
-	end_offset = edid_ext[2];
+	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+		goto end;
 
-	for (i = start_offset; i < end_offset;
-			i += ((edid_ext[i] & 0x1f) + 1)) {
-		if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+		if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
 			has_audio = true;
-			for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+			for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
 				DRM_DEBUG_KMS("CEA audio format %d\n",
 					      (edid_ext[i + j] >> 3) & 0xf);
 			goto end;
@@ -1613,6 +1919,7 @@
 end:
 	return has_audio;
 }
+EXPORT_SYMBOL(drm_detect_monitor_audio);
 
 /**
  * drm_add_display_info - pull display info out if present
@@ -1635,13 +1942,29 @@
 	info->bpc = 0;
 	info->color_formats = 0;
 
-	/* Only defined for 1.4 with digital displays */
-	if (edid->revision < 4)
+	if (edid->revision < 3)
 		return;
 
 	if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
 		return;
 
+	/* Get data from CEA blocks if present */
+	edid_ext = drm_find_cea_extension(edid);
+	if (edid_ext) {
+		info->cea_rev = edid_ext[1];
+
+		/* The existence of a CEA block should imply RGB support */
+		info->color_formats = DRM_COLOR_FORMAT_RGB444;
+		if (edid_ext[3] & EDID_CEA_YCRCB444)
+			info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+		if (edid_ext[3] & EDID_CEA_YCRCB422)
+			info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+	}
+
+	/* Only defined for 1.4 with digital displays */
+	if (edid->revision < 4)
+		return;
+
 	switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
 	case DRM_EDID_DIGITAL_DEPTH_6:
 		info->bpc = 6;
@@ -1667,18 +1990,11 @@
 		break;
 	}
 
-	info->color_formats = DRM_COLOR_FORMAT_RGB444;
-	if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
-		info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
-	if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
-		info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
-
-	/* Get data from CEA blocks if present */
-	edid_ext = drm_find_cea_extension(edid);
-	if (!edid_ext)
-		return;
-
-	info->cea_rev = edid_ext[1];
+	info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+	if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+		info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+	if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+		info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
 }
 
 /**
@@ -1699,7 +2015,7 @@
 		return 0;
 	}
 	if (!drm_edid_is_valid(edid)) {
-		device_printf(connector->dev->device, "%s: EDID invalid.\n",
+		dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
 			 drm_get_connector_name(connector));
 		return 0;
 	}
@@ -1724,7 +2040,9 @@
 	num_modes += add_cvt_modes(connector, edid);
 	num_modes += add_standard_modes(connector, edid);
 	num_modes += add_established_modes(connector, edid);
-	num_modes += add_inferred_modes(connector, edid);
+	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+		num_modes += add_inferred_modes(connector, edid);
+	num_modes += add_cea_modes(connector, edid);
 
 	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
 		edid_fixup_preferred(connector, quirks);
@@ -1733,6 +2051,7 @@
 
 	return num_modes;
 }
+EXPORT_SYMBOL(drm_add_edid_modes);
 
 /**
  * drm_add_modes_noedid - add modes for the connectors without EDID
@@ -1759,7 +2078,7 @@
 		vdisplay = 0;
 
 	for (i = 0; i < count; i++) {
-		struct drm_display_mode *ptr = &drm_dmt_modes[i];
+		const struct drm_display_mode *ptr = &drm_dmt_modes[i];
 		if (hdisplay && vdisplay) {
 			/*
 			 * Only when two are valid, they will be used to check
@@ -1780,3 +2099,23 @@
 	}
 	return num_modes;
 }
+EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+	uint8_t i;
+
+	for (i = 0; i < drm_num_cea_modes; i++)
+		if (drm_mode_equal(mode, &edid_cea_modes[i]))
+			return i + 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);

Modified: trunk/sys/dev/drm2/drm_edid.h
===================================================================
--- trunk/sys/dev/drm2/drm_edid.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_edid.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -21,14 +21,11 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_edid.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_edid.h 282199 2015-04-28 19:35:05Z dumbbell $
  */
 #ifndef __DRM_EDID_H__
 #define __DRM_EDID_H__
 
-#include <sys/types.h>
-#include <dev/drm2/drmP.h>
-
 #define EDID_LENGTH 128
 #define DDC_ADDR 0x50
 
@@ -94,12 +91,26 @@
 	u8 min_hfreq_khz;
 	u8 max_hfreq_khz;
 	u8 pixel_clock_mhz; /* need to multiply by 10 */
-	u16 sec_gtf_toggle; /* A000=use above, 20=use below */
-	u8 hfreq_start_khz; /* need to multiply by 2 */
-	u8 c; /* need to divide by 2 */
-	u16 m;
-	u8 k;
-	u8 j; /* need to divide by 2 */
+	u8 flags;
+	union {
+		struct {
+			u8 reserved;
+			u8 hfreq_start_khz; /* need to multiply by 2 */
+			u8 c; /* need to divide by 2 */
+			__le16 m;
+			u8 k;
+			u8 j; /* need to divide by 2 */
+		} __attribute__((packed)) gtf2;
+		struct {
+			u8 version;
+			u8 data1; /* high 6 bits: extra clock resolution */
+			u8 data2; /* plus low 2 of above: max hactive */
+			u8 supported_aspects;
+			u8 flags; /* preferred aspect and blanking support */
+			u8 supported_scalings;
+			u8 preferred_refresh;
+		} __attribute__((packed)) cvt;
+	} formula;
 } __attribute__((packed));
 
 struct detailed_data_wpindex {
@@ -146,7 +157,7 @@
 #define EDID_DETAIL_MONITOR_SERIAL 0xff
 
 struct detailed_timing {
-	u16 pixel_clock; /* need to multiply by 10 KHz */
+	__le16 pixel_clock; /* need to multiply by 10 KHz */
 	union {
 		struct detailed_pixel_timing pixel_data;
 		struct detailed_non_pixel other_data;
@@ -179,6 +190,7 @@
 #define DRM_EDID_FEATURE_DEFAULT_GTF      (1 << 0)
 #define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
 #define DRM_EDID_FEATURE_STANDARD_COLOR   (1 << 2)
+/* If analog */
 #define DRM_EDID_FEATURE_DISPLAY_TYPE     (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
 /* If digital */
 #define DRM_EDID_FEATURE_COLOR_MASK	  (3 << 3)
@@ -241,5 +253,6 @@
 		      struct drm_display_mode *mode);
 struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
 				     struct drm_display_mode *mode);
+int drm_load_edid_firmware(struct drm_connector *connector);
 
 #endif /* __DRM_EDID_H__ */

Modified: trunk/sys/dev/drm2/drm_edid_modes.h
===================================================================
--- trunk/sys/dev/drm2/drm_edid_modes.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_edid_modes.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -23,7 +23,7 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_edid_modes.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_edid_modes.h 282199 2015-04-28 19:35:05Z dumbbell $
  */
 
 #include <dev/drm2/drmP.h>
@@ -32,9 +32,8 @@
 /*
  * Autogenerated from the DMT spec.
  * This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
  */
-static struct drm_display_mode drm_dmt_modes[] = {
+static const struct drm_display_mode drm_dmt_modes[] = {
 	/* 640x350 at 85Hz */
 	{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
 		   736, 832, 0, 350, 382, 385, 445, 0,
@@ -83,12 +82,16 @@
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
 		   896, 1048, 0, 600, 601, 604, 631, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 800x600 at 120Hz RB */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+		   880, 960, 0, 600, 603, 607, 636, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 848x480 at 60Hz */
 	{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
 		   976, 1088, 0, 480, 486, 494, 517, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1024x768 at 43Hz, interlace */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
 		   1208, 1264, 0, 768, 768, 772, 817, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -108,10 +111,18 @@
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
 		   1168, 1376, 0, 768, 769, 772, 808, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1024x768 at 120Hz RB */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+		   1104, 1184, 0, 768, 771, 775, 813, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1152x864 at 75Hz */
 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
 		   1344, 1600, 0, 864, 865, 868, 900, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x768 at 60Hz RB */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+		   1360, 1440, 0, 768, 771, 778, 790, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1280x768 at 60Hz */
 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
 		   1472, 1664, 0, 768, 771, 778, 798, 0,
@@ -124,6 +135,14 @@
 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
 		   1496, 1712, 0, 768, 771, 778, 809, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x768 at 120Hz RB */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+		   1360, 1440, 0, 768, 771, 778, 813, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x800 at 60Hz RB */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+		   1360, 1440, 0, 800, 803, 809, 823, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1280x800 at 60Hz */
 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
 		   1480, 1680, 0, 800, 803, 809, 831, 0,
@@ -136,6 +155,10 @@
 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
 		   1496, 1712, 0, 800, 803, 809, 843, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x800 at 120Hz RB */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+		   1360, 1440, 0, 800, 803, 809, 847, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1280x960 at 60Hz */
 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
@@ -144,6 +167,10 @@
 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
 		   1504, 1728, 0, 960, 961, 964, 1011, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x960 at 120Hz RB */
+	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+		   1360, 1440, 0, 960, 963, 967, 1017, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1280x1024 at 60Hz */
 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
@@ -156,22 +183,42 @@
 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
 		   1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x1024 at 120Hz RB */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+		   1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1360x768 at 60Hz */
 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
 		   1536, 1792, 0, 768, 771, 777, 795, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1440x1050 at 60Hz */
+	/* 1360x768 at 120Hz RB */
+	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+		   1440, 1520, 0, 768, 771, 776, 813, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1400x1050 at 60Hz RB */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+		   1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1400x1050 at 60Hz */
 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1440x1050 at 75Hz */
+	/* 1400x1050 at 75Hz */
 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
 		   1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1440x1050 at 85Hz */
+	/* 1400x1050 at 85Hz */
 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
 		   1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1400x1050 at 120Hz RB */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+		   1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1440x900 at 60Hz RB */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+		   1520, 1600, 0, 900, 903, 909, 926, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1440x900 at 60Hz */
 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
 		   1672, 1904, 0, 900, 903, 909, 934, 0,
@@ -184,6 +231,10 @@
 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
 		   1696, 1952, 0, 900, 903, 909, 948, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x900 at 120Hz RB */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+		   1520, 1600, 0, 900, 903, 909, 953, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1600x1200 at 60Hz */
 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
@@ -204,6 +255,14 @@
 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200 at 120Hz RB */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+		   1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1680x1050 at 60Hz RB */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+		   1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1680x1050 at 60Hz */
 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
@@ -216,15 +275,23 @@
 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
 		   1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1680x1050 at 120Hz RB */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+		   1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1792x1344 at 60Hz */
 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1729x1344 at 75Hz */
+	/* 1792x1344 at 75Hz */
 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
 		   2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1853x1392 at 60Hz */
+	/* 1792x1344 at 120Hz RB */
+	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+		   1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1856x1392 at 60Hz */
 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -232,6 +299,14 @@
 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
 		   2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1856x1392 at 120Hz RB */
+	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+		   1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1920x1200 at 60Hz RB */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+		   2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1920x1200 at 60Hz */
 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
@@ -244,6 +319,10 @@
 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
 		   2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1200 at 120Hz RB */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+		   2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 1920x1440 at 60Hz */
 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
@@ -252,6 +331,14 @@
 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
 		   2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1440 at 120Hz RB */
+	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+		   2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 2560x1600 at 60Hz RB */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+		   2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 2560x1600 at 60Hz */
 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
@@ -264,11 +351,16 @@
 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
 		   3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 2560x1600 at 120Hz RB */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+		   2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+
 };
 static const int drm_num_dmt_modes =
 	sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 
-static struct drm_display_mode edid_est_modes[] = {
+static const struct drm_display_mode edid_est_modes[] = {
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
 		   968, 1056, 0, 600, 601, 605, 628, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600 at 60Hz */
@@ -305,7 +397,7 @@
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
 		   1184, 1344, 0,  768, 771, 777, 806, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768 at 60Hz */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
 		   1208, 1264, 0, 768, 768, 776, 817, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768 at 43Hz */
 	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
@@ -322,12 +414,14 @@
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864 at 75Hz */
 };
 
-static const struct {
+struct minimode {
 	short w;
 	short h;
 	short r;
 	short rb;
-} est3_modes[] = {
+};
+
+static const struct minimode est3_modes[] = {
 	/* byte 6 */
 	{ 640, 350, 85, 0 },
 	{ 640, 400, 85, 0 },
@@ -379,4 +473,304 @@
 	{ 1920, 1440, 60, 0 },
 	{ 1920, 1440, 75, 0 },
 };
-static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+static const int num_est3_modes = ARRAY_SIZE(est3_modes);
+
+static const struct minimode extra_modes[] = {
+	{ 1024, 576,  60, 0 },
+	{ 1366, 768,  60, 0 },
+	{ 1600, 900,  60, 0 },
+	{ 1680, 945,  60, 0 },
+	{ 1920, 1080, 60, 0 },
+	{ 2048, 1152, 60, 0 },
+	{ 2048, 1536, 60, 0 },
+};
+static const int num_extra_modes = ARRAY_SIZE(extra_modes);
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+	/* 1 - 640x480 at 60Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+		   752, 800, 0, 480, 490, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 2 - 720x480 at 60Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 3 - 720x480 at 60Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 4 - 1280x720 at 60Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 5 - 1920x1080i at 60Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 6 - 1440x480i at 60Hz */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 7 - 1440x480i at 60Hz */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 8 - 1440x240 at 60Hz */
+	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+		   1602, 1716, 0, 240, 244, 247, 262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK) },
+	/* 9 - 1440x240 at 60Hz */
+	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+		   1602, 1716, 0, 240, 244, 247, 262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK) },
+	/* 10 - 2880x480i at 60Hz */
+	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+		   3204, 3432, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 11 - 2880x480i at 60Hz */
+	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+		   3204, 3432, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 12 - 2880x240 at 60Hz */
+	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+		   3204, 3432, 0, 240, 244, 247, 262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 13 - 2880x240 at 60Hz */
+	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+		   3204, 3432, 0, 240, 244, 247, 262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 14 - 1440x480 at 60Hz */
+	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+		   1596, 1716, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 15 - 1440x480 at 60Hz */
+	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+		   1596, 1716, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 16 - 1920x1080 at 60Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 17 - 720x576 at 50Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 18 - 720x576 at 50Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 19 - 1280x720 at 50Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 20 - 1920x1080i at 50Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 21 - 1440x576i at 50Hz */
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 22 - 1440x576i at 50Hz */
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 23 - 1440x288 at 50Hz */
+	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+		   1590, 1728, 0, 288, 290, 293, 312, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK) },
+	/* 24 - 1440x288 at 50Hz */
+	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+		   1590, 1728, 0, 288, 290, 293, 312, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK) },
+	/* 25 - 2880x576i at 50Hz */
+	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+		   3180, 3456, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 26 - 2880x576i at 50Hz */
+	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+		   3180, 3456, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 27 - 2880x288 at 50Hz */
+	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+		   3180, 3456, 0, 288, 290, 293, 312, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 28 - 2880x288 at 50Hz */
+	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+		   3180, 3456, 0, 288, 290, 293, 312, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 29 - 1440x576 at 50Hz */
+	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+		   1592, 1728, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 30 - 1440x576 at 50Hz */
+	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+		   1592, 1728, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 31 - 1920x1080 at 50Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 32 - 1920x1080 at 24Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 33 - 1920x1080 at 25Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 34 - 1920x1080 at 30Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 35 - 2880x480 at 60Hz */
+	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+		   3192, 3432, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 36 - 2880x480 at 60Hz */
+	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+		   3192, 3432, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 37 - 2880x576 at 50Hz */
+	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+		   3184, 3456, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 38 - 2880x576 at 50Hz */
+	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+		   3184, 3456, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 39 - 1920x1080i at 50Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+		   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 40 - 1920x1080i at 100Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 41 - 1280x720 at 100Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 42 - 720x576 at 100Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 43 - 720x576 at 100Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 44 - 1440x576i at 100Hz */
+	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK) },
+	/* 45 - 1440x576i at 100Hz */
+	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK) },
+	/* 46 - 1920x1080i at 120Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 47 - 1280x720 at 120Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 48 - 720x480 at 120Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 49 - 720x480 at 120Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 50 - 1440x480i at 120Hz */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 51 - 1440x480i at 120Hz */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 52 - 720x576 at 200Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 53 - 720x576 at 200Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 54 - 1440x576i at 200Hz */
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 55 - 1440x576i at 200Hz */
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 56 - 720x480 at 240Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 57 - 720x480 at 240Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 58 - 1440x480i at 240 */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 59 - 1440x480i at 240 */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+	/* 60 - 1280x720 at 24Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 61 - 1280x720 at 25Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+		   3740, 3960, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 62 - 1280x720 at 30Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 63 - 1920x1080 at 120Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 64 - 1920x1080 at 100Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);

Modified: trunk/sys/dev/drm2/drm_fb_helper.c
===================================================================
--- trunk/sys/dev/drm2/drm_fb_helper.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_fb_helper.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -30,196 +30,152 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_fb_helper.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_fb_helper.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm_crtc.h>
 #include <dev/drm2/drm_fb_helper.h>
 #include <dev/drm2/drm_crtc_helper.h>
 
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
 static DRM_LIST_HEAD(kernel_fb_helper_list);
 
-/* simple single crtc case helper function */
-int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
-{
-	struct drm_device *dev = fb_helper->dev;
-	struct drm_connector *connector;
+#include <sys/kdb.h>
+#include <sys/param.h>
+#include <sys/systm.h>
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct drm_fb_helper_connector *fb_helper_connector;
+struct vt_kms_softc {
+	struct drm_fb_helper	*fb_helper;
+	struct task		 fb_mode_task;
+};
 
-		fb_helper_connector = malloc(
-		    sizeof(struct drm_fb_helper_connector), DRM_MEM_KMS,
-		    M_WAITOK | M_ZERO);
+/* Call restore out of vt(9) locks. */
+static void
+vt_restore_fbdev_mode(void *arg, int pending)
+{
+	struct drm_fb_helper *fb_helper;
+	struct vt_kms_softc *sc;
 
-		fb_helper_connector->connector = connector;
-		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
-	}
-	return 0;
+	sc = (struct vt_kms_softc *)arg;
+	fb_helper = sc->fb_helper;
+	sx_xlock(&fb_helper->dev->mode_config.mutex);
+	drm_fb_helper_restore_fbdev_mode(fb_helper);
+	sx_xunlock(&fb_helper->dev->mode_config.mutex);
 }
 
-const char *fb_mode_option;
-
-/**
- * drm_fb_helper_connector_parse_command_line - parse command line for connector
- * @connector - connector to parse line for
- * @mode_option - per connector mode option
- *
- * This parses the connector specific then generic command lines for
- * modes and options to configure the connector.
- *
- * This uses the same parameters as the fb modedb.c, except for extra
- *	<xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
- *
- * enable/enable Digital/disable bit at the end
- */
-static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
-						       const char *mode_option)
+static int
+vt_kms_postswitch(void *arg)
 {
-	const char *name;
-	unsigned int namelen;
-	int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
-	unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
-	int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
-	int i;
-	enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
-	struct drm_fb_helper_cmdline_mode *cmdline_mode;
-	struct drm_connector *connector;
+	struct vt_kms_softc *sc;
 
-	if (!fb_helper_conn)
-		return false;
-	connector = fb_helper_conn->connector;
+	sc = (struct vt_kms_softc *)arg;
 
-	cmdline_mode = &fb_helper_conn->cmdline_mode;
-	if (!mode_option)
-		mode_option = fb_mode_option;
+	if (!kdb_active && panicstr == NULL)
+		taskqueue_enqueue_fast(taskqueue_thread, &sc->fb_mode_task);
+	else
+		drm_fb_helper_restore_fbdev_mode(sc->fb_helper);
 
-	if (!mode_option) {
-		cmdline_mode->specified = false;
-		return false;
-	}
+	return (0);
+}
 
-	name = mode_option;
-	namelen = strlen(name);
-	for (i = namelen-1; i >= 0; i--) {
-		switch (name[i]) {
-		case '@':
-			namelen = i;
-			if (!refresh_specified && !bpp_specified &&
-			    !yres_specified) {
-				refresh = strtol(&name[i+1], NULL, 10);
-				refresh_specified = 1;
-				if (cvt || rb)
-					cvt = 0;
-			} else
-				goto done;
-			break;
-		case '-':
-			namelen = i;
-			if (!bpp_specified && !yres_specified) {
-				bpp = strtol(&name[i+1], NULL, 10);
-				bpp_specified = 1;
-				if (cvt || rb)
-					cvt = 0;
-			} else
-				goto done;
-			break;
-		case 'x':
-			if (!yres_specified) {
-				yres = strtol(&name[i+1], NULL, 10);
-				yres_specified = 1;
-			} else
-				goto done;
-		case '0' ... '9':
-			break;
-		case 'M':
-			if (!yres_specified)
-				cvt = 1;
-			break;
-		case 'R':
-			if (cvt)
-				rb = 1;
-			break;
-		case 'm':
-			if (!cvt)
-				margins = 1;
-			break;
-		case 'i':
-			if (!cvt)
-				interlace = 1;
-			break;
-		case 'e':
-			force = DRM_FORCE_ON;
-			break;
-		case 'D':
-			if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
-			    (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
-				force = DRM_FORCE_ON;
-			else
-				force = DRM_FORCE_ON_DIGITAL;
-			break;
-		case 'd':
-			force = DRM_FORCE_OFF;
-			break;
-		default:
-			goto done;
-		}
-	}
-	if (i < 0 && yres_specified) {
-		xres = strtol(name, NULL, 10);
-		res_specified = 1;
-	}
-done:
+struct fb_info *
+framebuffer_alloc()
+{
+	struct fb_info *info;
+	struct vt_kms_softc *sc;
 
-	DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
-		drm_get_connector_name(connector), xres, yres,
-		(refresh) ? refresh : 60, (rb) ? " reduced blanking" :
-		"", (margins) ? " with margins" : "", (interlace) ?
-		" interlaced" : "");
+	info = malloc(sizeof(*info), DRM_MEM_KMS, M_WAITOK | M_ZERO);
 
-	if (force) {
-		const char *s;
-		switch (force) {
-		case DRM_FORCE_OFF: s = "OFF"; break;
-		case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
-		default:
-		case DRM_FORCE_ON: s = "ON"; break;
-		}
+	sc = malloc(sizeof(*sc), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+	TASK_INIT(&sc->fb_mode_task, 0, vt_restore_fbdev_mode, sc);
 
-		DRM_INFO("forcing %s connector %s\n",
-			 drm_get_connector_name(connector), s);
-		connector->force = force;
-	}
+	info->fb_priv = sc;
+	info->enter = &vt_kms_postswitch;
 
-	if (res_specified) {
-		cmdline_mode->specified = true;
-		cmdline_mode->xres = xres;
-		cmdline_mode->yres = yres;
-	}
+	return (info);
+}
 
-	if (refresh_specified) {
-		cmdline_mode->refresh_specified = true;
-		cmdline_mode->refresh = refresh;
-	}
+void
+framebuffer_release(struct fb_info *info)
+{
 
-	if (bpp_specified) {
-		cmdline_mode->bpp_specified = true;
-		cmdline_mode->bpp = bpp;
-	}
-	cmdline_mode->rb = rb ? true : false;
-	cmdline_mode->cvt = cvt  ? true : false;
-	cmdline_mode->interlace = interlace ? true : false;
-
-	return true;
+	free(info->fb_priv, DRM_MEM_KMS);
+	free(info, DRM_MEM_KMS);
 }
 
 static int
 fb_get_options(const char *connector_name, char **option)
 {
+	char tunable[64];
 
-	return (1);
+	/*
+	 * A user may use loader tunables to set a specific mode for the
+	 * console. Tunables are read in the following order:
+	 *     1. kern.vt.fb.modes.$connector_name
+	 *     2. kern.vt.fb.default_mode
+	 *
+	 * Example of a mode specific to the LVDS connector:
+	 *     kern.vt.fb.modes.LVDS="1024x768"
+	 *
+	 * Example of a mode applied to all connectors not having a
+	 * connector-specific mode:
+	 *     kern.vt.fb.default_mode="640x480"
+	 */
+	snprintf(tunable, sizeof(tunable), "kern.vt.fb.modes.%s",
+	    connector_name);
+	DRM_INFO("Connector %s: get mode from tunables:\n", connector_name);
+	DRM_INFO("  - %s\n", tunable);
+	DRM_INFO("  - kern.vt.fb.default_mode\n");
+	*option = getenv(tunable);
+	if (*option == NULL)
+		*option = getenv("kern.vt.fb.default_mode");
+
+	return (*option != NULL ? 0 : -ENOENT);
 }
 
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_connector *connector;
+	int i;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct drm_fb_helper_connector *fb_helper_connector;
+
+		fb_helper_connector = malloc(sizeof(struct drm_fb_helper_connector),
+		    DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+		if (!fb_helper_connector)
+			goto fail;
+
+		fb_helper_connector->connector = connector;
+		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+	}
+	return 0;
+fail:
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		free(fb_helper->connector_info[i], DRM_MEM_KMS);
+		fb_helper->connector_info[i] = NULL;
+	}
+	fb_helper->connector_count = 0;
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
+
 static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
 {
 	struct drm_fb_helper_connector *fb_helper_conn;
@@ -226,20 +182,56 @@
 	int i;
 
 	for (i = 0; i < fb_helper->connector_count; i++) {
+		struct drm_cmdline_mode *mode;
+		struct drm_connector *connector;
 		char *option = NULL;
 
 		fb_helper_conn = fb_helper->connector_info[i];
+		connector = fb_helper_conn->connector;
+		mode = &fb_helper_conn->cmdline_mode;
 
 		/* do something on return - turn off connector maybe */
-		if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
+		if (fb_get_options(drm_get_connector_name(connector), &option))
 			continue;
 
-		drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
+		if (drm_mode_parse_command_line_for_connector(option,
+							      connector,
+							      mode)) {
+			if (mode->force) {
+				const char *s;
+				switch (mode->force) {
+				case DRM_FORCE_OFF:
+					s = "OFF";
+					break;
+				case DRM_FORCE_ON_DIGITAL:
+					s = "ON - dig";
+					break;
+				default:
+				case DRM_FORCE_ON:
+					s = "ON";
+					break;
+				}
+
+				DRM_INFO("forcing %s connector %s\n",
+					 drm_get_connector_name(connector), s);
+				connector->force = mode->force;
+			}
+
+			DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+				      drm_get_connector_name(connector),
+				      mode->xres, mode->yres,
+				      mode->refresh_specified ? mode->refresh : 60,
+				      mode->rb ? " reduced blanking" : "",
+				      mode->margins ? " with margins" : "",
+				      mode->interlace ?  " interlaced" : "");
+		}
+
+		freeenv(option);
 	}
 	return 0;
 }
 
-#if 0
+#if 0 && defined(FREEBSD_NOTYET)
 static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
 {
 	uint16_t *r_base, *g_base, *b_base;
@@ -257,6 +249,9 @@
 {
 	uint16_t *r_base, *g_base, *b_base;
 
+	if (crtc->funcs->gamma_set == NULL)
+		return;
+
 	r_base = crtc->gamma_store;
 	g_base = r_base + crtc->gamma_size;
 	b_base = g_base + crtc->gamma_size;
@@ -263,9 +258,7 @@
 
 	crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
 }
-#endif
 
-#if 0
 int drm_fb_helper_debug_enter(struct fb_info *info)
 {
 	struct drm_fb_helper *helper = info->par;
@@ -295,9 +288,8 @@
 
 	return 0;
 }
-#endif
+EXPORT_SYMBOL(drm_fb_helper_debug_enter);
 
-#if 0
 /* Find the real fb for a given fb helper CRTC */
 static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
 {
@@ -311,9 +303,7 @@
 
 	return NULL;
 }
-#endif
 
-#if 0
 int drm_fb_helper_debug_leave(struct fb_info *info)
 {
 	struct drm_fb_helper *helper = info->par;
@@ -343,7 +333,8 @@
 
 	return 0;
 }
-#endif
+EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+#endif /* FREEBSD_NOTYET */
 
 bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 {
@@ -351,15 +342,15 @@
 	int i, ret;
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
-		ret = drm_crtc_helper_set_config(mode_set);
+		ret = mode_set->crtc->funcs->set_config(mode_set);
 		if (ret)
 			error = true;
 	}
 	return error;
 }
+EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
 
-#if 0
-bool drm_fb_helper_force_kernel_mode(void)
+static bool drm_fb_helper_force_kernel_mode(void)
 {
 	bool ret, error = false;
 	struct drm_fb_helper *helper;
@@ -377,20 +368,27 @@
 	}
 	return error;
 }
-#endif
 
-#if 0
+#if 0 && defined(FREEBSD_NOTYET)
 int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
 			void *panic_str)
 {
-	printf("panic occurred, switching back to text console\n");
+	/*
+	 * It's a waste of time and effort to switch back to text console
+	 * if the kernel should reboot before panic messages can be seen.
+	 */
+	if (panic_timeout < 0)
+		return 0;
+
+	pr_err("panic occurred, switching back to text console\n");
 	return drm_fb_helper_force_kernel_mode();
-	return 0;
 }
+EXPORT_SYMBOL(drm_fb_helper_panic);
 
 static struct notifier_block paniced = {
 	.notifier_call = drm_fb_helper_panic,
 };
+#endif /* FREEBSD_NOTYET */
 
 /**
  * drm_fb_helper_restore - restore the framebuffer console (kernel) config
@@ -404,7 +402,9 @@
 	if (ret == true)
 		DRM_ERROR("Failed to restore crtc configuration\n");
 }
+EXPORT_SYMBOL(drm_fb_helper_restore);
 
+#ifdef __linux__
 #ifdef CONFIG_MAGIC_SYSRQ
 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 {
@@ -427,127 +427,64 @@
 #endif
 #endif
 
-#if 0
-static void drm_fb_helper_on(struct fb_info *info)
+#if 0 && defined(FREEBSD_NOTYET)
+static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
 {
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_crtc *crtc;
-	struct drm_crtc_helper_funcs *crtc_funcs;
 	struct drm_connector *connector;
-	struct drm_encoder *encoder;
 	int i, j;
 
 	/*
-	 * For each CRTC in this fb, turn the crtc on then,
-	 * find all associated encoders and turn them on.
+	 * For each CRTC in this fb, turn the connectors on/off.
 	 */
 	sx_xlock(&dev->mode_config.mutex);
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		crtc = fb_helper->crtc_info[i].mode_set.crtc;
-		crtc_funcs = crtc->helper_private;
 
 		if (!crtc->enabled)
 			continue;
 
-		crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
-		/* Walk the connectors & encoders on this fb turning them on */
+		/* Walk the connectors & encoders on this fb turning them on/off */
 		for (j = 0; j < fb_helper->connector_count; j++) {
 			connector = fb_helper->connector_info[j]->connector;
-			connector->dpms = DRM_MODE_DPMS_ON;
-			drm_connector_property_set_value(connector,
-							 dev->mode_config.dpms_property,
-							 DRM_MODE_DPMS_ON);
+			connector->funcs->dpms(connector, dpms_mode);
+			drm_object_property_set_value(&connector->base,
+				dev->mode_config.dpms_property, dpms_mode);
 		}
-		/* Found a CRTC on this fb, now find encoders */
-		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-			if (encoder->crtc == crtc) {
-				struct drm_encoder_helper_funcs *encoder_funcs;
-
-				encoder_funcs = encoder->helper_private;
-				encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-			}
-		}
 	}
 	sx_xunlock(&dev->mode_config.mutex);
 }
-#endif
 
-#if 0
-static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
-{
-	struct drm_fb_helper *fb_helper = info->par;
-	struct drm_device *dev = fb_helper->dev;
-	struct drm_crtc *crtc;
-	struct drm_crtc_helper_funcs *crtc_funcs;
-	struct drm_connector *connector;
-	struct drm_encoder *encoder;
-	int i, j;
-
-	/*
-	 * For each CRTC in this fb, find all associated encoders
-	 * and turn them off, then turn off the CRTC.
-	 */
-	sx_xlock(&dev->mode_config.mutex);
-	for (i = 0; i < fb_helper->crtc_count; i++) {
-		crtc = fb_helper->crtc_info[i].mode_set.crtc;
-		crtc_funcs = crtc->helper_private;
-
-		if (!crtc->enabled)
-			continue;
-
-		/* Walk the connectors on this fb and mark them off */
-		for (j = 0; j < fb_helper->connector_count; j++) {
-			connector = fb_helper->connector_info[j]->connector;
-			connector->dpms = dpms_mode;
-			drm_connector_property_set_value(connector,
-							 dev->mode_config.dpms_property,
-							 dpms_mode);
-		}
-		/* Found a CRTC on this fb, now find encoders */
-		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-			if (encoder->crtc == crtc) {
-				struct drm_encoder_helper_funcs *encoder_funcs;
-
-				encoder_funcs = encoder->helper_private;
-				encoder_funcs->dpms(encoder, dpms_mode);
-			}
-		}
-		crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-	}
-	sx_xunlock(&dev->mode_config.mutex);
-}
-#endif
-
-#if 0
 int drm_fb_helper_blank(int blank, struct fb_info *info)
 {
 	switch (blank) {
 	/* Display: On; HSync: On, VSync: On */
 	case FB_BLANK_UNBLANK:
-		drm_fb_helper_on(info);
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
 		break;
 	/* Display: Off; HSync: On, VSync: On */
 	case FB_BLANK_NORMAL:
-		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
 		break;
 	/* Display: Off; HSync: Off, VSync: On */
 	case FB_BLANK_HSYNC_SUSPEND:
-		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
 		break;
 	/* Display: Off; HSync: On, VSync: Off */
 	case FB_BLANK_VSYNC_SUSPEND:
-		drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
 		break;
 	/* Display: Off; HSync: Off, VSync: Off */
 	case FB_BLANK_POWERDOWN:
-		drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
 		break;
 	}
 	return 0;
 }
-#endif
+EXPORT_SYMBOL(drm_fb_helper_blank);
+#endif /* FREEBSD_NOTYET */
 
 static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
 {
@@ -556,8 +493,11 @@
 	for (i = 0; i < helper->connector_count; i++)
 		free(helper->connector_info[i], DRM_MEM_KMS);
 	free(helper->connector_info, DRM_MEM_KMS);
-	for (i = 0; i < helper->crtc_count; i++)
+	for (i = 0; i < helper->crtc_count; i++) {
 		free(helper->crtc_info[i].mode_set.connectors, DRM_MEM_KMS);
+		if (helper->crtc_info[i].mode_set.mode)
+			drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+	}
 	free(helper->crtc_info, DRM_MEM_KMS);
 }
 
@@ -572,52 +512,64 @@
 
 	INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
 
-	fb_helper->crtc_info = malloc(crtc_count *
-	    sizeof(struct drm_fb_helper_crtc), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+	fb_helper->crtc_info = malloc(crtc_count * sizeof(struct drm_fb_helper_crtc),
+	    DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+	if (!fb_helper->crtc_info)
+		return -ENOMEM;
 
 	fb_helper->crtc_count = crtc_count;
-	fb_helper->connector_info = malloc(dev->mode_config.num_connector *
-	    sizeof(struct drm_fb_helper_connector *), DRM_MEM_KMS,
-	    M_WAITOK | M_ZERO);
+	fb_helper->connector_info = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_connector *),
+	    DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+	if (!fb_helper->connector_info) {
+		free(fb_helper->crtc_info, DRM_MEM_KMS);
+		return -ENOMEM;
+	}
 	fb_helper->connector_count = 0;
 
 	for (i = 0; i < crtc_count; i++) {
 		fb_helper->crtc_info[i].mode_set.connectors =
-			malloc(max_conn_count * sizeof(struct drm_connector *),
-			    DRM_MEM_KMS, M_WAITOK | M_ZERO);
+			malloc(max_conn_count *
+				sizeof(struct drm_connector *),
+				DRM_MEM_KMS, M_NOWAIT | M_ZERO);
 
+		if (!fb_helper->crtc_info[i].mode_set.connectors)
+			goto out_free;
 		fb_helper->crtc_info[i].mode_set.num_connectors = 0;
 	}
 
 	i = 0;
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		fb_helper->crtc_info[i].crtc_id = crtc->base.id;
 		fb_helper->crtc_info[i].mode_set.crtc = crtc;
 		i++;
 	}
-	fb_helper->conn_limit = max_conn_count;
+
 	return 0;
+out_free:
+	drm_fb_helper_crtc_free(fb_helper);
+	return -ENOMEM;
 }
+EXPORT_SYMBOL(drm_fb_helper_init);
 
 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 {
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 		list_del(&fb_helper->kernel_fb_list);
+#if 0 && defined(FREEBSD_NOTYET)
 		if (list_empty(&kernel_fb_helper_list)) {
-#if 0
-			printk(KERN_INFO "drm: unregistered panic notifier\n");
+			pr_info("drm: unregistered panic notifier\n");
 			atomic_notifier_chain_unregister(&panic_notifier_list,
 							 &paniced);
 			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-#endif
 		}
+#endif /* FREEBSD_NOTYET */
 	}
 
 	drm_fb_helper_crtc_free(fb_helper);
 
 }
+EXPORT_SYMBOL(drm_fb_helper_fini);
 
-#if 0
+#if 0 && defined(FREEBSD_NOTYET)
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -625,7 +577,7 @@
 	struct drm_framebuffer *fb = fb_helper->fb;
 	int pindex;
 
-	if (info->fix.visual == FB_VISUAL_trueCOLOR) {
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
 		u32 *palette;
 		u32 value;
 		/* place color in psuedopalette */
@@ -681,9 +633,7 @@
 		fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
 	return 0;
 }
-#endif
 
-#if 0
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
 {
 	struct drm_fb_helper *fb_helper = info->par;
@@ -721,9 +671,8 @@
 	}
 	return rc;
 }
-#endif
+EXPORT_SYMBOL(drm_fb_helper_setcmap);
 
-#if 0
 int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 			    struct fb_info *info)
 {
@@ -814,9 +763,8 @@
 	}
 	return 0;
 }
-#endif
+EXPORT_SYMBOL(drm_fb_helper_check_var);
 
-#if 0
 /* this will let fbcon do the mode init */
 int drm_fb_helper_set_par(struct fb_info *info)
 {
@@ -832,16 +780,16 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&dev->mode_config.mutex);
+	sx_xlock(&dev->mode_config.mutex);
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		crtc = fb_helper->crtc_info[i].mode_set.crtc;
 		ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
 		if (ret) {
-			mutex_unlock(&dev->mode_config.mutex);
+			sx_xunlock(&dev->mode_config.mutex);
 			return ret;
 		}
 	}
-	mutex_unlock(&dev->mode_config.mutex);
+	sx_xunlock(&dev->mode_config.mutex);
 
 	if (fb_helper->delayed_hotplug) {
 		fb_helper->delayed_hotplug = false;
@@ -849,9 +797,8 @@
 	}
 	return 0;
 }
-#endif
+EXPORT_SYMBOL(drm_fb_helper_set_par);
 
-#if 0
 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 			      struct fb_info *info)
 {
@@ -862,7 +809,7 @@
 	int ret = 0;
 	int i;
 
-	mutex_lock(&dev->mode_config.mutex);
+	sx_xlock(&dev->mode_config.mutex);
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		crtc = fb_helper->crtc_info[i].mode_set.crtc;
 
@@ -879,10 +826,11 @@
 			}
 		}
 	}
-	mutex_unlock(&dev->mode_config.mutex);
+	sx_xunlock(&dev->mode_config.mutex);
 	return ret;
 }
-#endif
+EXPORT_SYMBOL(drm_fb_helper_pan_display);
+#endif /* FREEBSD_NOTYET */
 
 int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 				  int preferred_bpp)
@@ -890,11 +838,12 @@
 	int new_fb = 0;
 	int crtc_count = 0;
 	int i;
-#if 0
 	struct fb_info *info;
-#endif
 	struct drm_fb_helper_surface_size sizes;
 	int gamma_size = 0;
+#if defined(__FreeBSD__)
+	device_t kdev;
+#endif
 
 	memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
 	sizes.surface_depth = 24;
@@ -904,13 +853,13 @@
 
 	/* if driver picks 8 or 16 by default use that
 	   for both depth/bpp */
-	if (preferred_bpp != sizes.surface_bpp) {
+	if (preferred_bpp != sizes.surface_bpp)
 		sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
-	}
+
 	/* first up get a count of crtcs now in use and new min/maxes width/heights */
 	for (i = 0; i < fb_helper->connector_count; i++) {
 		struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
-		struct drm_fb_helper_cmdline_mode *cmdline_mode;
+		struct drm_cmdline_mode *cmdline_mode;
 
 		cmdline_mode = &fb_helper_conn->cmdline_mode;
 
@@ -971,136 +920,83 @@
 	if (new_fb < 0)
 		return new_fb;
 
-#if 0
 	info = fb_helper->fbdev;
-#endif
 
 	/* set the fb pointer */
-	for (i = 0; i < fb_helper->crtc_count; i++) {
+	for (i = 0; i < fb_helper->crtc_count; i++)
 		fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
-	}
 
-#if 0
+#if defined(__FreeBSD__)
 	if (new_fb) {
+		device_t fbd;
+		int ret;
+
+		kdev = fb_helper->dev->dev;
+		fbd = device_add_child(kdev, "fbd", device_get_unit(kdev));
+		if (fbd != NULL) 
+			ret = device_probe_and_attach(fbd);
+		else
+			ret = ENODEV;
+#ifdef DEV_VT
+		if (ret != 0)
+			DRM_ERROR("Failed to attach fbd device: %d\n", ret);
+#endif
+	}
+#else
+	if (new_fb) {
 		info->var.pixclock = 0;
-		if (register_framebuffer(info) < 0) {
+		if (register_framebuffer(info) < 0)
 			return -EINVAL;
-		}
 
-		printf("fb%d: %s frame buffer device\n", info->node,
-		       info->fix.id);
+		dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+				info->node, info->fix.id);
 
 	} else {
 		drm_fb_helper_set_par(info);
 	}
+#endif
 
+#if 0 && defined(FREEBSD_NOTYET)
 	/* Switch back to kernel console on panic */
 	/* multi card linked list maybe */
 	if (list_empty(&kernel_fb_helper_list)) {
-		printf("drm: registered panic notifier\n");
+		dev_info(fb_helper->dev->dev, "registered panic notifier\n");
 		atomic_notifier_chain_register(&panic_notifier_list,
 					       &paniced);
+		register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 	}
+#endif /* FREEBSD_NOTYET */
 	if (new_fb)
 		list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
-#endif
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
 
-#if 0
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 			    uint32_t depth)
 {
-	info->fix.type = FB_TYPE_PACKED_PIXELS;
-	info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
-		FB_VISUAL_trueCOLOR;
-	info->fix.mmio_start = 0;
-	info->fix.mmio_len = 0;
-	info->fix.type_aux = 0;
-	info->fix.xpanstep = 1; /* doing it in hw */
-	info->fix.ypanstep = 1; /* doing it in hw */
-	info->fix.ywrapstep = 0;
-	info->fix.accel = FB_ACCEL_NONE;
-	info->fix.type_aux = 0;
+	info->fb_stride = pitch;
 
-	info->fix.line_length = pitch;
 	return;
 }
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
 
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height)
 {
 	struct drm_framebuffer *fb = fb_helper->fb;
-	info->pseudo_palette = fb_helper->pseudo_palette;
-	info->var.xres_virtual = fb->width;
-	info->var.yres_virtual = fb->height;
-	info->var.bits_per_pixel = fb->bits_per_pixel;
-	info->var.accel_flags = FB_ACCELF_TEXT;
-	info->var.xoffset = 0;
-	info->var.yoffset = 0;
-	info->var.activate = FB_ACTIVATE_NOW;
-	info->var.height = -1;
-	info->var.width = -1;
+	struct vt_kms_softc *sc;
 
-	switch (fb->depth) {
-	case 8:
-		info->var.red.offset = 0;
-		info->var.green.offset = 0;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8; /* 8bit DAC */
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 0;
-		info->var.transp.length = 0;
-		break;
-	case 15:
-		info->var.red.offset = 10;
-		info->var.green.offset = 5;
-		info->var.blue.offset = 0;
-		info->var.red.length = 5;
-		info->var.green.length = 5;
-		info->var.blue.length = 5;
-		info->var.transp.offset = 15;
-		info->var.transp.length = 1;
-		break;
-	case 16:
-		info->var.red.offset = 11;
-		info->var.green.offset = 5;
-		info->var.blue.offset = 0;
-		info->var.red.length = 5;
-		info->var.green.length = 6;
-		info->var.blue.length = 5;
-		info->var.transp.offset = 0;
-		break;
-	case 24:
-		info->var.red.offset = 16;
-		info->var.green.offset = 8;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8;
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 0;
-		info->var.transp.length = 0;
-		break;
-	case 32:
-		info->var.red.offset = 16;
-		info->var.green.offset = 8;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8;
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 24;
-		info->var.transp.length = 8;
-		break;
-	default:
-		break;
-	}
+	info->fb_name = device_get_nameunit(fb_helper->dev->dev);
+	info->fb_width = fb->width;
+	info->fb_height = fb->height;
+	info->fb_depth = fb->bits_per_pixel;
 
-	info->var.xres = fb_width;
-	info->var.yres = fb_height;
+	sc = (struct vt_kms_softc *)info->fb_priv;
+	sc->fb_helper = fb_helper;
 }
-#endif
+EXPORT_SYMBOL(drm_fb_helper_fill_var);
 
 static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
 					       uint32_t maxX,
@@ -1134,7 +1030,7 @@
 
 static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
 {
-	struct drm_fb_helper_cmdline_mode *cmdline_mode;
+	struct drm_cmdline_mode *cmdline_mode;
 	cmdline_mode = &fb_connector->cmdline_mode;
 	return cmdline_mode->specified;
 }
@@ -1145,11 +1041,9 @@
 	struct drm_cmdline_mode *cmdline_mode;
 	struct drm_display_mode *mode = NULL;
 
-	cmdline_mode = &fb_helper_conn->cmdline_mode1;
-	if (cmdline_mode->specified == false &&
-	    !drm_fetch_cmdline_mode_from_kenv(fb_helper_conn->connector,
-	    cmdline_mode))
-			return (NULL);
+	cmdline_mode = &fb_helper_conn->cmdline_mode;
+	if (cmdline_mode->specified == false)
+		return mode;
 
 	/* attempt to find a matching mode in the list of modes
 	 *  we have gotten so far, if not add a CVT mode that conforms
@@ -1176,19 +1070,8 @@
 	}
 
 create_mode:
-	if (cmdline_mode->cvt)
-		mode = drm_cvt_mode(fb_helper_conn->connector->dev,
-				    cmdline_mode->xres, cmdline_mode->yres,
-				    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
-				    cmdline_mode->rb, cmdline_mode->interlace,
-				    cmdline_mode->margins);
-	else
-		mode = drm_gtf_mode(fb_helper_conn->connector->dev,
-				    cmdline_mode->xres, cmdline_mode->yres,
-				    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
-				    cmdline_mode->interlace,
-				    cmdline_mode->margins);
-	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+	mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
+						 cmdline_mode);
 	list_add(&mode->head, &fb_helper_conn->connector->modes);
 	return mode;
 }
@@ -1197,11 +1080,11 @@
 {
 	bool enable;
 
-	if (strict) {
+	if (strict)
 		enable = connector->status == connector_status_connected;
-	} else {
+	else
 		enable = connector->status != connector_status_disconnected;
-	}
+
 	return enable;
 }
 
@@ -1278,7 +1161,7 @@
 
 	/* try and find a 1024x768 mode on each connector */
 	can_clone = true;
-	dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+	dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
 
 	for (i = 0; i < fb_helper->connector_count; i++) {
 
@@ -1364,8 +1247,9 @@
 		return best_score;
 
 	crtcs = malloc(dev->mode_config.num_connector *
-	    sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS,
-	    M_WAITOK | M_ZERO);
+			sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+	if (!crtcs)
+		return best_score;
 
 	my_score = 1;
 	if (connector->status == connector_status_connected)
@@ -1385,9 +1269,8 @@
 	for (c = 0; c < fb_helper->crtc_count; c++) {
 		crtc = &fb_helper->crtc_info[c];
 
-		if ((encoder->possible_crtcs & (1 << c)) == 0) {
+		if ((encoder->possible_crtcs & (1 << c)) == 0)
 			continue;
-		}
 
 		for (o = 0; o < n; o++)
 			if (best_crtcs[o] == crtc)
@@ -1424,7 +1307,6 @@
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_fb_helper_crtc **crtcs;
 	struct drm_display_mode **modes;
-	struct drm_encoder *encoder;
 	struct drm_mode_set *modeset;
 	bool *enabled;
 	int width, height;
@@ -1435,20 +1317,18 @@
 	width = dev->mode_config.max_width;
 	height = dev->mode_config.max_height;
 
-	/* clean out all the encoder/crtc combos */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		encoder->crtc = NULL;
-	}
-
 	crtcs = malloc(dev->mode_config.num_connector *
-	    sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS,
-	    M_WAITOK | M_ZERO);
+			sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
 	modes = malloc(dev->mode_config.num_connector *
-	    sizeof(struct drm_display_mode *), DRM_MEM_KMS,
-	    M_WAITOK | M_ZERO);
+			sizeof(struct drm_display_mode *), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
 	enabled = malloc(dev->mode_config.num_connector *
-	    sizeof(bool), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+			  sizeof(bool), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+	if (!crtcs || !modes || !enabled) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto out;
+	}
 
+
 	drm_enable_connectors(fb_helper, enabled);
 
 	ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
@@ -1486,6 +1366,7 @@
 		}
 	}
 
+out:
 	free(crtcs, DRM_MEM_KMS);
 	free(modes, DRM_MEM_KMS);
 	free(enabled, DRM_MEM_KMS);
@@ -1493,12 +1374,14 @@
 
 /**
  * drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
  *
  * LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
  *
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
  * At the moment, this is a cloned configuration across all heads with
  * a new framebuffer object as the backing store.
  *
@@ -1521,20 +1404,35 @@
 	/*
 	 * we shouldn't end up with no modes here.
 	 */
-	if (count == 0) {
-		printf("No connectors reported connected with modes\n");
-	}
+	if (count == 0)
+		dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
 	drm_setup_crtcs(fb_helper);
 
 	return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
 }
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
 
+/**
+ * drm_fb_helper_hotplug_event - respond to a hotplug notification by
+ *                               probing all the outputs attached to the fb
+ * @fb_helper: the drm_fb_helper
+ *
+ * LOCKING:
+ * Called at runtime, must take mode config lock.
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * RETURNS:
+ * 0 on success and a non-zero error code otherwise.
+ */
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 {
 	struct drm_device *dev = fb_helper->dev;
 	int count = 0;
 	u32 max_width, max_height, bpp_sel;
-	bool bound = false, crtcs_bound = false;
+	int bound = 0, crtcs_bound = 0;
 	struct drm_crtc *crtc;
 
 	if (!fb_helper->fb)
@@ -1543,12 +1441,12 @@
 	sx_xlock(&dev->mode_config.mutex);
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		if (crtc->fb)
-			crtcs_bound = true;
+			crtcs_bound++;
 		if (crtc->fb == fb_helper->fb)
-			bound = true;
+			bound++;
 	}
 
-	if (!bound && crtcs_bound) {
+	if (bound < crtcs_bound) {
 		fb_helper->delayed_hotplug = true;
 		sx_xunlock(&dev->mode_config.mutex);
 		return 0;
@@ -1566,4 +1464,4 @@
 
 	return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
 }
-
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);

Modified: trunk/sys/dev/drm2/drm_fb_helper.h
===================================================================
--- trunk/sys/dev/drm2/drm_fb_helper.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_fb_helper.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -28,7 +28,7 @@
  *      Dave Airlie <airlied at linux.ie>
  *      Jesse Barnes <jesse.barnes at intel.com>
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_fb_helper.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_fb_helper.h 282199 2015-04-28 19:35:05Z dumbbell $
  */
 #ifndef DRM_FB_HELPER_H
 #define DRM_FB_HELPER_H
@@ -36,25 +36,10 @@
 struct drm_fb_helper;
 
 struct drm_fb_helper_crtc {
-	uint32_t crtc_id;
 	struct drm_mode_set mode_set;
 	struct drm_display_mode *desired_mode;
 };
 
-/* mode specified on the command line */
-struct drm_fb_helper_cmdline_mode {
-	bool specified;
-	bool refresh_specified;
-	bool bpp_specified;
-	int xres, yres;
-	int bpp;
-	int refresh;
-	bool rb;
-	bool interlace;
-	bool cvt;
-	bool margins;
-};
-
 struct drm_fb_helper_surface_size {
 	u32 fb_width;
 	u32 fb_height;
@@ -75,9 +60,8 @@
 };
 
 struct drm_fb_helper_connector {
-	struct drm_fb_helper_cmdline_mode cmdline_mode;
-	struct drm_cmdline_mode cmdline_mode1;
 	struct drm_connector *connector;
+	struct drm_cmdline_mode cmdline_mode;
 };
 
 struct drm_fb_helper {
@@ -90,7 +74,6 @@
 	int connector_count;
 	struct drm_fb_helper_connector **connector_info;
 	struct drm_fb_helper_funcs *funcs;
-	int conn_limit;
 	struct fb_info *fbdev;
 	u32 pseudo_palette[17];
 	struct list_head kernel_fb_list;
@@ -100,9 +83,6 @@
 	bool delayed_hotplug;
 };
 
-struct fb_var_screeninfo;
-struct fb_cmap;
-
 int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
 				  int preferred_bpp);
 
@@ -111,11 +91,15 @@
 		       int max_conn);
 void drm_fb_helper_fini(struct drm_fb_helper *helper);
 int drm_fb_helper_blank(int blank, struct fb_info *info);
+#ifdef FREEBSD_NOTYET
 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 			      struct fb_info *info);
+#endif /* FREEBSD_NOTYET */
 int drm_fb_helper_set_par(struct fb_info *info);
+#ifdef FREEBSD_NOTYET
 int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 			    struct fb_info *info);
+#endif /* FREEBSD_NOTYET */
 int drm_fb_helper_setcolreg(unsigned regno,
 			    unsigned red,
 			    unsigned green,
@@ -130,7 +114,9 @@
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 			    uint32_t depth);
 
+#ifdef FREEBSD_NOTYET
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
+#endif /* FREEBSD_NOTYET */
 
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
 bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
@@ -137,6 +123,5 @@
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
 int drm_fb_helper_debug_enter(struct fb_info *info);
 int drm_fb_helper_debug_leave(struct fb_info *info);
-bool drm_fb_helper_force_kernel_mode(void);
 
 #endif

Modified: trunk/sys/dev/drm2/drm_fops.c
===================================================================
--- trunk/sys/dev/drm2/drm_fops.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_fops.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,16 @@
 /* $MidnightBSD$ */
-/*-
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Daryll Strauss <daryll at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith at valinux.com
+ *
  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,51 +33,177 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Daryll Strauss <daryll at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_fops.c 241088 2012-10-01 06:42:07Z hselasky $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_fops.c 295623 2016-02-15 07:35:40Z dumbbell $");
 
-/** @file drm_fops.c
- * Support code for dealing with the file privates associated with each
- * open of the DRM device.
+#include <dev/drm2/drmP.h>
+
+static int drm_open_helper(struct cdev *kdev, int flags, int fmt,
+			   DRM_STRUCTPROC *p, struct drm_device *dev);
+
+static int drm_setup(struct drm_device * dev)
+{
+	int i;
+	int ret;
+
+	if (dev->driver->firstopen) {
+		ret = dev->driver->firstopen(dev);
+		if (ret != 0)
+			return ret;
+	}
+
+	atomic_set(&dev->ioctl_count, 0);
+	atomic_set(&dev->vma_count, 0);
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
+		dev->buf_use = 0;
+		atomic_set(&dev->buf_alloc, 0);
+
+		i = drm_dma_setup(dev);
+		if (i < 0)
+			return i;
+	}
+
+	/*
+	 * FIXME Linux<->FreeBSD: counter incremented in drm_open() and
+	 * reset to 0 here.
+	 */
+#if 0
+	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+		atomic_set(&dev->counts[i], 0);
+#endif
+
+	dev->sigdata.lock = NULL;
+
+	dev->context_flag = 0;
+	dev->interrupt_flag = 0;
+	dev->dma_flag = 0;
+	dev->last_context = 0;
+	dev->last_switch = 0;
+	dev->last_checked = 0;
+	DRM_INIT_WAITQUEUE(&dev->context_wait);
+	dev->if_version = 0;
+
+#ifdef FREEBSD_NOTYET
+	dev->ctx_start = 0;
+	dev->lck_start = 0;
+
+	dev->buf_async = NULL;
+	DRM_INIT_WAITQUEUE(&dev->buf_readers);
+	DRM_INIT_WAITQUEUE(&dev->buf_writers);
+#endif /* FREEBSD_NOTYET */
+
+	DRM_DEBUG("\n");
+
+	/*
+	 * The kernel's context could be created here, but is now created
+	 * in drm_dma_enqueue.  This is more resource-efficient for
+	 * hardware that does not do DMA, but may mean that
+	 * drm_select_queue fails between the time the interrupt is
+	 * initialized and the time the queues are initialized.
+	 */
+
+	return 0;
+}
+
+/**
+ * Open file.
+ *
+ * \param inode device inode
+ * \param filp file pointer.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
  */
+int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
+{
+	struct drm_device *dev = NULL;
+	struct drm_minor *minor;
+	int retcode = 0;
+	int need_setup = 0;
 
-#include <dev/drm2/drmP.h>
+	minor = kdev->si_drv1;
+	if (!minor)
+		return ENODEV;
 
-/* drm_open_helper is called whenever a process opens /dev/drm. */
-int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
-		    struct drm_device *dev)
+	if (!(dev = minor->dev))
+		return ENODEV;
+
+	sx_xlock(&drm_global_mutex);
+
+	/*
+	 * FIXME Linux<->FreeBSD: On Linux, counter updated outside
+	 * global mutex.
+	 */
+	if (!dev->open_count++)
+		need_setup = 1;
+
+	retcode = drm_open_helper(kdev, flags, fmt, p, dev);
+	if (retcode) {
+		sx_xunlock(&drm_global_mutex);
+		return (-retcode);
+	}
+	atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+	if (need_setup) {
+		retcode = drm_setup(dev);
+		if (retcode)
+			goto err_undo;
+	}
+	sx_xunlock(&drm_global_mutex);
+	return 0;
+
+err_undo:
+	mtx_lock(&Giant); /* FIXME: Giant required? */
+	device_unbusy(dev->dev);
+	mtx_unlock(&Giant);
+	dev->open_count--;
+	sx_xunlock(&drm_global_mutex);
+	return -retcode;
+}
+EXPORT_SYMBOL(drm_open);
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct cdev *kdev, int flags, int fmt,
+			   DRM_STRUCTPROC *p, struct drm_device *dev)
 {
 	struct drm_file *priv;
-	int retcode;
+	int ret;
 
 	if (flags & O_EXCL)
-		return EBUSY; /* No exclusive opens */
-	dev->flags = flags;
+		return -EBUSY;	/* No exclusive opens */
+	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+		return -EINVAL;
 
 	DRM_DEBUG("pid = %d, device = %s\n", DRM_CURRENTPID, devtoname(kdev));
 
 	priv = malloc(sizeof(*priv), DRM_MEM_FILES, M_NOWAIT | M_ZERO);
-	if (priv == NULL) {
-		return ENOMEM;
-	}
+	if (!priv)
+		return -ENOMEM;
 
-	DRM_LOCK(dev);
-	priv->dev		= dev;
-	priv->uid		= p->td_ucred->cr_svuid;
-	priv->pid		= p->td_proc->p_pid;
-	priv->ioctl_count 	= 0;
-
+	priv->uid = p->td_ucred->cr_svuid;
+	priv->pid = p->td_proc->p_pid;
+	priv->minor = kdev->si_drv1;
+	priv->ioctl_count = 0;
 	/* for compatibility root is always authenticated */
-	priv->authenticated	= DRM_SUSER(p);
+	priv->authenticated = DRM_SUSER(p);
+	priv->lock_count = 0;
 
+	INIT_LIST_HEAD(&priv->lhead);
 	INIT_LIST_HEAD(&priv->fbs);
 	INIT_LIST_HEAD(&priv->event_list);
 	priv->event_space = 4096; /* set aside 4k for event buffer */
@@ -74,47 +211,289 @@
 	if (dev->driver->driver_features & DRIVER_GEM)
 		drm_gem_open(dev, priv);
 
+#ifdef FREEBSD_NOTYET
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_prime_init_file_private(&priv->prime);
+#endif /* FREEBSD_NOTYET */
+
 	if (dev->driver->open) {
-		/* shared code returns -errno */
-		retcode = -dev->driver->open(dev, priv);
-		if (retcode != 0) {
-			free(priv, DRM_MEM_FILES);
+		ret = dev->driver->open(dev, priv);
+		if (ret < 0)
+			goto out_free;
+	}
+
+
+	/* if there is no current master make this fd it */
+	DRM_LOCK(dev);
+	if (!priv->minor->master) {
+		/* create a new master */
+		priv->minor->master = drm_master_create(priv->minor);
+		if (!priv->minor->master) {
 			DRM_UNLOCK(dev);
-			return retcode;
+			ret = -ENOMEM;
+			goto out_free;
 		}
+
+		priv->is_master = 1;
+		/* take another reference for the copy in the local file priv */
+		priv->master = drm_master_get(priv->minor->master);
+
+		priv->authenticated = 1;
+
+		DRM_UNLOCK(dev);
+		if (dev->driver->master_create) {
+			ret = dev->driver->master_create(dev, priv->master);
+			if (ret) {
+				DRM_LOCK(dev);
+				/* drop both references if this fails */
+				drm_master_put(&priv->minor->master);
+				drm_master_put(&priv->master);
+				DRM_UNLOCK(dev);
+				goto out_free;
+			}
+		}
+		DRM_LOCK(dev);
+		if (dev->driver->master_set) {
+			ret = dev->driver->master_set(dev, priv, true);
+			if (ret) {
+				/* drop both references if this fails */
+				drm_master_put(&priv->minor->master);
+				drm_master_put(&priv->master);
+				DRM_UNLOCK(dev);
+				goto out_free;
+			}
+		}
+		DRM_UNLOCK(dev);
+	} else {
+		/* get a reference to the master */
+		priv->master = drm_master_get(priv->minor->master);
+		DRM_UNLOCK(dev);
 	}
 
-	/* first opener automatically becomes master */
-	priv->master = TAILQ_EMPTY(&dev->files);
+	DRM_LOCK(dev);
+	list_add(&priv->lhead, &dev->filelist);
+	DRM_UNLOCK(dev);
 
-	TAILQ_INSERT_TAIL(&dev->files, priv, link);
+	mtx_lock(&Giant); /* FIXME: Giant required? */
+	device_busy(dev->dev);
+	mtx_unlock(&Giant);
+
+	ret = devfs_set_cdevpriv(priv, drm_release);
+	if (ret != 0)
+		drm_release(priv);
+
+	return ret;
+      out_free:
+	free(priv, DRM_MEM_FILES);
+	return ret;
+}
+
+static void drm_master_release(struct drm_device *dev, struct drm_file *file_priv)
+{
+
+	if (drm_i_have_hw_lock(dev, file_priv)) {
+		DRM_DEBUG("File %p released, freeing lock for context %d\n",
+			  file_priv, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+		drm_lock_free(&file_priv->master->lock,
+			      _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+	}
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_pending_event *e, *et;
+	struct drm_pending_vblank_event *v, *vt;
+	unsigned long flags;
+
+	DRM_SPINLOCK_IRQSAVE(&dev->event_lock, flags);
+
+	/* Remove pending flips */
+	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+		if (v->base.file_priv == file_priv) {
+			list_del(&v->base.link);
+			drm_vblank_put(dev, v->pipe);
+			v->base.destroy(&v->base);
+		}
+
+	/* Remove unconsumed events */
+	list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+		e->destroy(e);
+
+	DRM_SPINUNLOCK_IRQRESTORE(&dev->event_lock, flags);
+}
+
+/**
+ * Release file.
+ *
+ * \param inode device inode
+ * \param file_priv DRM file private.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
+void drm_release(void *data)
+{
+	struct drm_file *file_priv = data;
+	struct drm_device *dev = file_priv->minor->dev;
+
+	sx_xlock(&drm_global_mutex);
+
+	DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+	if (dev->driver->preclose)
+		dev->driver->preclose(dev, file_priv);
+
+	/* ========================================================
+	 * Begin inline drm_release
+	 */
+
+	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+		  DRM_CURRENTPID,
+		  (long)file_priv->minor->device,
+		  dev->open_count);
+
+	/* Release any auth tokens that might point to this file_priv,
+	   (do that under the drm_global_mutex) */
+	if (file_priv->magic)
+		(void) drm_remove_magic(file_priv->master, file_priv->magic);
+
+	/* if the master has gone away we can't do anything with the lock */
+	if (file_priv->minor->master)
+		drm_master_release(dev, file_priv);
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		drm_core_reclaim_buffers(dev, file_priv);
+
+	drm_events_release(file_priv);
+
+	seldrain(&file_priv->event_poll);
+
+	if (dev->driver->driver_features & DRIVER_MODESET)
+		drm_fb_release(file_priv);
+
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_release(dev, file_priv);
+
+#ifdef FREEBSD_NOTYET
+	mutex_lock(&dev->ctxlist_mutex);
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+			if (pos->tag == file_priv &&
+			    pos->handle != DRM_KERNEL_CONTEXT) {
+				if (dev->driver->context_dtor)
+					dev->driver->context_dtor(dev,
+								  pos->handle);
+
+				drm_ctxbitmap_free(dev, pos->handle);
+
+				list_del(&pos->head);
+				kfree(pos);
+				--dev->ctx_count;
+			}
+		}
+	}
+	mutex_unlock(&dev->ctxlist_mutex);
+#endif /* FREEBSD_NOTYET */
+
+	DRM_LOCK(dev);
+
+	if (file_priv->is_master) {
+		struct drm_master *master = file_priv->master;
+		struct drm_file *temp;
+		list_for_each_entry(temp, &dev->filelist, lhead) {
+			if ((temp->master == file_priv->master) &&
+			    (temp != file_priv))
+				temp->authenticated = 0;
+		}
+
+		/**
+		 * Since the master is disappearing, so is the
+		 * possibility to lock.
+		 */
+
+		if (master->lock.hw_lock) {
+			if (dev->sigdata.lock == master->lock.hw_lock)
+				dev->sigdata.lock = NULL;
+			master->lock.hw_lock = NULL;
+			master->lock.file_priv = NULL;
+			DRM_WAKEUP_INT(&master->lock.lock_queue);
+		}
+
+		if (file_priv->minor->master == file_priv->master) {
+			/* drop the reference held my the minor */
+			if (dev->driver->master_drop)
+				dev->driver->master_drop(dev, file_priv, true);
+			drm_master_put(&file_priv->minor->master);
+		}
+	}
+
+	/* drop the reference held my the file priv */
+	drm_master_put(&file_priv->master);
+	file_priv->is_master = 0;
+	list_del(&file_priv->lhead);
 	DRM_UNLOCK(dev);
-	kdev->si_drv1 = dev;
 
-	retcode = devfs_set_cdevpriv(priv, drm_close);
-	if (retcode != 0)
-		drm_close(priv);
+	if (dev->driver->postclose)
+		dev->driver->postclose(dev, file_priv);
 
-	return (retcode);
+#ifdef FREEBSD_NOTYET
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_prime_destroy_file_private(&file_priv->prime);
+#endif /* FREEBSD_NOTYET */
+
+	free(file_priv, DRM_MEM_FILES);
+
+	/* ========================================================
+	 * End inline drm_release
+	 */
+
+	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+	mtx_lock(&Giant);
+	device_unbusy(dev->dev);
+	mtx_unlock(&Giant);
+	if (!--dev->open_count) {
+		if (atomic_read(&dev->ioctl_count)) {
+			DRM_ERROR("Device busy: %d\n",
+				  atomic_read(&dev->ioctl_count));
+		} else
+			drm_lastclose(dev);
+	}
+	sx_xunlock(&drm_global_mutex);
 }
+EXPORT_SYMBOL(drm_release);
 
 static bool
-drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
-    struct uio *uio, struct drm_pending_event **out)
+drm_dequeue_event(struct drm_file *file_priv, struct uio *uio,
+    struct drm_pending_event **out)
 {
 	struct drm_pending_event *e;
+	bool ret = false;
 
+	/* Already locked in drm_read(). */
+	/* DRM_SPINLOCK_IRQSAVE(&dev->event_lock, flags); */
+
+	*out = NULL;
 	if (list_empty(&file_priv->event_list))
-		return (false);
+		goto out;
 	e = list_first_entry(&file_priv->event_list,
-	    struct drm_pending_event, link);
+			     struct drm_pending_event, link);
 	if (e->event->length > uio->uio_resid)
-		return (false);
+		goto out;
 
 	file_priv->event_space += e->event->length;
 	list_del(&e->link);
 	*out = e;
-	return (true);
+	ret = true;
+
+out:
+	/* DRM_SPINUNLOCK_IRQRESTORE(&dev->event_lock, flags); */
+	return ret;
 }
 
 int
@@ -123,7 +502,7 @@
 	struct drm_file *file_priv;
 	struct drm_device *dev;
 	struct drm_pending_event *e;
-	int error;
+	ssize_t error;
 
 	error = devfs_get_cdevpriv((void **)&file_priv);
 	if (error != 0) {
@@ -130,6 +509,7 @@
 		DRM_ERROR("can't find authenticator\n");
 		return (EINVAL);
 	}
+
 	dev = drm_get_device_from_kdev(kdev);
 	mtx_lock(&dev->event_lock);
 	while (list_empty(&file_priv->event_list)) {
@@ -142,20 +522,24 @@
 	       if (error != 0)
 		       goto out;
 	}
-	while (drm_dequeue_event(dev, file_priv, uio, &e)) {
+
+	while (drm_dequeue_event(file_priv, uio, &e)) {
 		mtx_unlock(&dev->event_lock);
 		error = uiomove(e->event, e->event->length, uio);
 		CTR3(KTR_DRM, "drm_event_dequeued %d %d %d", curproc->p_pid,
 		    e->event->type, e->event->length);
+
 		e->destroy(e);
 		if (error != 0)
 			return (error);
 		mtx_lock(&dev->event_lock);
 	}
+
 out:
 	mtx_unlock(&dev->event_lock);
 	return (error);
 }
+EXPORT_SYMBOL(drm_read);
 
 void
 drm_event_wakeup(struct drm_pending_event *e)
@@ -164,7 +548,7 @@
 	struct drm_device *dev;
 
 	file_priv = e->file_priv;
-	dev = file_priv->dev;
+	dev = file_priv->minor->dev;
 	mtx_assert(&dev->event_lock, MA_OWNED);
 
 	wakeup(&file_priv->event_space);
@@ -183,6 +567,7 @@
 		DRM_ERROR("can't find authenticator\n");
 		return (EINVAL);
 	}
+
 	dev = drm_get_device_from_kdev(kdev);
 
 	revents = 0;
@@ -199,3 +584,21 @@
 	mtx_unlock(&dev->event_lock);
 	return (revents);
 }
+EXPORT_SYMBOL(drm_poll);
+
+int
+drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
+    struct vm_object **obj_res, int nprot)
+{
+	struct drm_device *dev;
+
+	dev = drm_get_device_from_kdev(kdev);
+	if (dev->drm_ttm_bdev != NULL) {
+		return (-ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
+		    obj_res, nprot));
+	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
+		return (-drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
+	} else {
+		return (ENODEV);
+	}
+}

Modified: trunk/sys/dev/drm2/drm_fourcc.h
===================================================================
--- trunk/sys/dev/drm2/drm_fourcc.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_fourcc.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -21,17 +21,15 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_fourcc.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_fourcc.h 282199 2015-04-28 19:35:05Z dumbbell $
  */
 
 #ifndef DRM_FOURCC_H
 #define DRM_FOURCC_H
 
-#include <sys/types.h>
+#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
+				 ((__u32)(c) << 16) | ((__u32)(d) << 24))
 
-#define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
-				 ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
-
 #define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
 
 /* color index */
@@ -109,9 +107,10 @@
 #define DRM_FORMAT_NV21		fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
 #define DRM_FORMAT_NV16		fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
 #define DRM_FORMAT_NV61		fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV24		fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV42		fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
 
-/* 2 non contiguous plane YCbCr */
-#define DRM_FORMAT_NV12M	fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+/* special NV12 tiled format */
 #define DRM_FORMAT_NV12MT	fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
 
 /*
@@ -134,7 +133,4 @@
 #define DRM_FORMAT_YUV444	fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
 #define DRM_FORMAT_YVU444	fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
 
-/* 3 non contiguous plane YCbCr */
-#define DRM_FORMAT_YUV420M	fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
-
 #endif /* DRM_FOURCC_H */

Modified: trunk/sys/dev/drm2/drm_gem.c
===================================================================
--- trunk/sys/dev/drm2/drm_gem.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_gem.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_gem.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_gem.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include "opt_vm.h"
 
@@ -55,7 +55,7 @@
  * the faked up offset will fit
  */
 
-#if ULONG_MAX == UINT64_MAX
+#if BITS_PER_LONG == 64
 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
 #else
@@ -63,6 +63,10 @@
 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
 #endif
 
+/**
+ * Initialize the GEM device fields
+ */
+
 int
 drm_gem_init(struct drm_device *dev)
 {
@@ -69,22 +73,30 @@
 	struct drm_gem_mm *mm;
 
 	drm_gem_names_init(&dev->object_names);
-	mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_WAITOK);
+
+	mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_NOWAIT);
+	if (!mm) {
+		DRM_ERROR("out of memory\n");
+		return -ENOMEM;
+	}
+
 	dev->mm_private = mm;
-	if (drm_ht_create(&mm->offset_hash, 19) != 0) {
+
+	if (drm_ht_create(&mm->offset_hash, 19)) {
 		free(mm, DRM_MEM_DRIVER);
-		return (ENOMEM);
+		return -ENOMEM;
 	}
+
 	mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL);
-	return (0);
+
+	return 0;
 }
 
 void
 drm_gem_destroy(struct drm_device *dev)
 {
-	struct drm_gem_mm *mm;
+	struct drm_gem_mm *mm = dev->mm_private;
 
-	mm = dev->mm_private;
 	dev->mm_private = NULL;
 	drm_ht_remove(&mm->offset_hash);
 	delete_unrhdr(mm->idxunr);
@@ -92,11 +104,9 @@
 	drm_gem_names_fini(&dev->object_names);
 }
 
-int
-drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
-    size_t size)
+int drm_gem_object_init(struct drm_device *dev,
+			struct drm_gem_object *obj, size_t size)
 {
-
 	KASSERT((size & (PAGE_SIZE - 1)) == 0,
 	    ("Bad size %ju", (uintmax_t)size));
 
@@ -108,14 +118,18 @@
 	obj->handle_count = 0;
 	obj->size = size;
 
-	return (0);
+	return 0;
 }
+EXPORT_SYMBOL(drm_gem_object_init);
 
-int
-drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj,
-    size_t size)
+/**
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+ */
+int drm_gem_private_object_init(struct drm_device *dev,
+			struct drm_gem_object *obj, size_t size)
 {
-
 	MPASS((size & (PAGE_SIZE - 1)) == 0);
 
 	obj->dev = dev;
@@ -122,262 +136,301 @@
 	obj->vm_obj = NULL;
 
 	obj->refcount = 1;
-	atomic_set(&obj->handle_count, 0);
+	atomic_store_rel_int(&obj->handle_count, 0);
 	obj->size = size;
 
-	return (0);
+	return 0;
 }
+EXPORT_SYMBOL(drm_gem_private_object_init);
 
-
 struct drm_gem_object *
 drm_gem_object_alloc(struct drm_device *dev, size_t size)
 {
 	struct drm_gem_object *obj;
 
-	obj = malloc(sizeof(*obj), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+	obj = malloc(sizeof(*obj), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!obj)
+		goto free;
+
 	if (drm_gem_object_init(dev, obj, size) != 0)
 		goto free;
 
 	if (dev->driver->gem_init_object != NULL &&
-	    dev->driver->gem_init_object(obj) != 0)
+	    dev->driver->gem_init_object(obj) != 0) {
 		goto dealloc;
-	return (obj);
+	}
+	return obj;
 dealloc:
 	vm_object_deallocate(obj->vm_obj);
 free:
 	free(obj, DRM_MEM_DRIVER);
-	return (NULL);
+	return NULL;
 }
+EXPORT_SYMBOL(drm_gem_object_alloc);
 
-void
-drm_gem_object_free(struct drm_gem_object *obj)
+#if defined(FREEBSD_NOTYET)
+static void
+drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
+	if (obj->import_attach) {
+		drm_prime_remove_buf_handle(&filp->prime,
+				obj->import_attach->dmabuf);
+	}
+	if (obj->export_dma_buf) {
+		drm_prime_remove_buf_handle(&filp->prime,
+				obj->export_dma_buf);
+	}
+}
+#endif
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+{
 	struct drm_device *dev;
+	struct drm_gem_object *obj;
 
+	obj = drm_gem_names_remove(&filp->object_names, handle);
+	if (obj == NULL) {
+		return -EINVAL;
+	}
 	dev = obj->dev;
-	DRM_LOCK_ASSERT(dev);
-	if (dev->driver->gem_free_object != NULL)
-		dev->driver->gem_free_object(obj);
-}
 
-void
-drm_gem_object_reference(struct drm_gem_object *obj)
-{
+#if defined(FREEBSD_NOTYET)
+	drm_gem_remove_prime_handles(obj, filp);
+#endif
 
-	KASSERT(obj->refcount > 0, ("Dandling obj %p", obj));
-	refcount_acquire(&obj->refcount);
+	if (dev->driver->gem_close_object)
+		dev->driver->gem_close_object(obj, filp);
+	drm_gem_object_handle_unreference_unlocked(obj);
+
+	return 0;
 }
+EXPORT_SYMBOL(drm_gem_handle_delete);
 
-void
-drm_gem_object_unreference(struct drm_gem_object *obj)
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+		       struct drm_gem_object *obj,
+		       u32 *handlep)
 {
+	struct drm_device *dev = obj->dev;
+	int ret;
 
-	if (obj == NULL)
-		return;
-	if (refcount_release(&obj->refcount))
-		drm_gem_object_free(obj);
+	*handlep = 0;
+	ret = drm_gem_name_create(&file_priv->object_names, obj, handlep);
+	if (ret != 0)
+		return ret;
+
+	drm_gem_object_handle_reference(obj);
+
+	if (dev->driver->gem_open_object) {
+		ret = dev->driver->gem_open_object(obj, file_priv);
+		if (ret) {
+			drm_gem_handle_delete(file_priv, *handlep);
+			return ret;
+		}
+	}
+
+	return 0;
 }
+EXPORT_SYMBOL(drm_gem_handle_create);
 
 void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
-	struct drm_device *dev;
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_hash_item *list = &obj->map_list;
 
-	if (obj == NULL)
+	if (!obj->on_map)
 		return;
-	dev = obj->dev;
-	DRM_LOCK(dev);
-	drm_gem_object_unreference(obj);
-	DRM_UNLOCK(dev);
+
+	drm_ht_remove_item(&mm->offset_hash, list);
+	free_unr(mm->idxunr, list->key);
+	obj->on_map = false;
 }
+EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 
-void
-drm_gem_object_handle_reference(struct drm_gem_object *obj)
+int
+drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 {
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	int ret;
 
-	drm_gem_object_reference(obj);
-	atomic_add_rel_int(&obj->handle_count, 1);
-}
+	if (obj->on_map)
+		return 0;
 
-void
-drm_gem_object_handle_free(struct drm_gem_object *obj)
-{
-	struct drm_device *dev;
-	struct drm_gem_object *obj1;
+	obj->map_list.key = alloc_unr(mm->idxunr);
+	ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
+	if (ret) {
+		DRM_ERROR("failed to add to map hash\n");
+		free_unr(mm->idxunr, obj->map_list.key);
+		return ret;
+	}
+	obj->on_map = true;
 
-	dev = obj->dev;
-	if (obj->name != 0) {
-		obj1 = drm_gem_names_remove(&dev->object_names, obj->name);
-		obj->name = 0;
-		drm_gem_object_unreference(obj1);
-	}
+	return 0;
 }
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 
-void
-drm_gem_object_handle_unreference(struct drm_gem_object *obj)
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+		      u32 handle)
 {
+	struct drm_gem_object *obj;
 
-	if (obj == NULL ||
-	    atomic_load_acq_int(&obj->handle_count) == 0)
-		return;
+	obj = drm_gem_name_ref(&filp->object_names, handle,
+	    (void (*)(void *))drm_gem_object_reference);
 
-	if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
-		drm_gem_object_handle_free(obj);
-	drm_gem_object_unreference(obj);
+	return obj;
 }
+EXPORT_SYMBOL(drm_gem_object_lookup);
 
-void
-drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
 {
+	struct drm_gem_close *args = data;
+	int ret;
 
-	if (obj == NULL ||
-	    atomic_load_acq_int(&obj->handle_count) == 0)
-		return;
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
 
-	if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
-		drm_gem_object_handle_free(obj);
-	drm_gem_object_unreference_unlocked(obj);
-}
+	ret = drm_gem_handle_delete(file_priv, args->handle);
 
-int
-drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj,
-    uint32_t *handle)
-{
-	int error;
-
-	error = drm_gem_name_create(&file_priv->object_names, obj, handle);
-	if (error != 0)
-		return (error);
-	drm_gem_object_handle_reference(obj);
-	return (0);
+	return ret;
 }
 
 int
-drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle)
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
 {
+	struct drm_gem_flink *args = data;
 	struct drm_gem_object *obj;
+	int ret;
 
-	obj = drm_gem_names_remove(&file_priv->object_names, handle);
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL)
-		return (EINVAL);
-	drm_gem_object_handle_unreference_unlocked(obj);
-	return (0);
-}
+		return -ENOENT;
 
-void
-drm_gem_object_release(struct drm_gem_object *obj)
-{
-
-	/*
-	 * obj->vm_obj can be NULL for private gem objects.
-	 */
-	vm_object_deallocate(obj->vm_obj);
+	ret = drm_gem_name_create(&dev->object_names, obj, &obj->name);
+	if (ret != 0) {
+		if (ret == -EALREADY)
+			ret = 0;
+		drm_gem_object_unreference_unlocked(obj);
+	}
+	if (ret == 0)
+		args->name = obj->name;
+	return ret;
 }
 
 int
 drm_gem_open_ioctl(struct drm_device *dev, void *data,
-    struct drm_file *file_priv)
+		   struct drm_file *file_priv)
 {
-	struct drm_gem_open *args;
+	struct drm_gem_open *args = data;
 	struct drm_gem_object *obj;
 	int ret;
-	uint32_t handle;
+	u32 handle;
 
-	if (!drm_core_check_feature(dev, DRIVER_GEM))
-		return (ENODEV);
-	args = data;
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
 
 	obj = drm_gem_name_ref(&dev->object_names, args->name,
 	    (void (*)(void *))drm_gem_object_reference);
-	if (obj == NULL)
-		return (ENOENT);
-	handle = 0;
+	if (!obj)
+		return -ENOENT;
+
 	ret = drm_gem_handle_create(file_priv, obj, &handle);
 	drm_gem_object_unreference_unlocked(obj);
-	if (ret != 0)
-		return (ret);
-	
+	if (ret)
+		return ret;
+
 	args->handle = handle;
 	args->size = obj->size;
 
-	return (0);
+	return 0;
 }
 
 void
-drm_gem_open(struct drm_device *dev, struct drm_file *file_priv)
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
 {
 
-	drm_gem_names_init(&file_priv->object_names);
+	drm_gem_names_init(&file_private->object_names);
 }
 
 static int
-drm_gem_object_release_handle(uint32_t name, void *ptr, void *arg)
+drm_gem_object_release_handle(uint32_t name, void *ptr, void *data)
 {
-	struct drm_gem_object *obj;
+	struct drm_file *file_priv = data;
+	struct drm_gem_object *obj = ptr;
+	struct drm_device *dev = obj->dev;
 
-	obj = ptr;
-	drm_gem_object_handle_unreference(obj);
-	return (0);
+#if defined(FREEBSD_NOTYET)
+	drm_gem_remove_prime_handles(obj, file_priv);
+#endif
+
+	if (dev->driver->gem_close_object)
+		dev->driver->gem_close_object(obj, file_priv);
+
+	drm_gem_object_handle_unreference_unlocked(obj);
+
+	return 0;
 }
 
 void
-drm_gem_release(struct drm_device *dev, struct drm_file *file_priv)
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 {
+	drm_gem_names_foreach(&file_private->object_names,
+	    drm_gem_object_release_handle, file_private);
 
-	drm_gem_names_foreach(&file_priv->object_names,
-	    drm_gem_object_release_handle, NULL);
-	drm_gem_names_fini(&file_priv->object_names);
+	drm_gem_names_fini(&file_private->object_names);
 }
 
-int
-drm_gem_close_ioctl(struct drm_device *dev, void *data,
-    struct drm_file *file_priv)
+void
+drm_gem_object_release(struct drm_gem_object *obj)
 {
-	struct drm_gem_close *args;
 
-	if (!drm_core_check_feature(dev, DRIVER_GEM))
-		return (ENODEV);
-	args = data;
-
-	return (drm_gem_handle_delete(file_priv, args->handle));
+	/*
+	 * obj->vm_obj can be NULL for private gem objects.
+	 */
+	vm_object_deallocate(obj->vm_obj);
 }
+EXPORT_SYMBOL(drm_gem_object_release);
 
-int
-drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-    struct drm_file *file_priv)
+void
+drm_gem_object_free(struct drm_gem_object *obj)
 {
-	struct drm_gem_flink *args;
-	struct drm_gem_object *obj;
-	int error;
+	struct drm_device *dev = obj->dev;
 
-	if (!drm_core_check_feature(dev, DRIVER_GEM))
-		return (ENODEV);
-	args = data;
-
-	obj = drm_gem_name_ref(&file_priv->object_names, args->handle,
-	    (void (*)(void *))drm_gem_object_reference);
-	if (obj == NULL)
-		return (ENOENT);
-	error = drm_gem_name_create(&dev->object_names, obj, &obj->name);
-	if (error != 0) {
-		if (error == EALREADY)
-			error = 0;
-		drm_gem_object_unreference_unlocked(obj);
-	}
-	if (error == 0)
-		args->name = obj->name;
-	return (error);
+	DRM_LOCK_ASSERT(dev);
+	if (dev->driver->gem_free_object != NULL)
+		dev->driver->gem_free_object(obj);
 }
+EXPORT_SYMBOL(drm_gem_object_free);
 
-struct drm_gem_object *
-drm_gem_object_lookup(struct drm_device *dev, struct drm_file *file_priv,
-    uint32_t handle)
+void drm_gem_object_handle_free(struct drm_gem_object *obj)
 {
-	struct drm_gem_object *obj;
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_object *obj1;
 
-	obj = drm_gem_name_ref(&file_priv->object_names, handle,
-	    (void (*)(void *))drm_gem_object_reference);
-	return (obj);
+	if (obj->name) {
+		obj1 = drm_gem_names_remove(&dev->object_names, obj->name);
+		obj->name = 0;
+		drm_gem_object_unreference(obj1);
+	}
 }
 
 static struct drm_gem_object *
@@ -397,66 +450,22 @@
 		    (uintmax_t)offset);
 		return (NULL);
 	}
-	obj = member2struct(drm_gem_object, map_list, map_list);
+	obj = __containerof(map_list, struct drm_gem_object, map_list);
 	return (obj);
 }
 
 int
-drm_gem_create_mmap_offset(struct drm_gem_object *obj)
-{
-	struct drm_device *dev;
-	struct drm_gem_mm *mm;
-	int ret;
-
-	if (obj->on_map)
-		return (0);
-	dev = obj->dev;
-	mm = dev->mm_private;
-	ret = 0;
-
-	obj->map_list.key = alloc_unr(mm->idxunr);
-	ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
-	if (ret != 0) {
-		DRM_ERROR("failed to add to map hash\n");
-		free_unr(mm->idxunr, obj->map_list.key);
-		return (ret);
-	}
-	obj->on_map = true;
-	return (0);
-}
-
-void
-drm_gem_free_mmap_offset(struct drm_gem_object *obj)
-{
-	struct drm_hash_item *list;
-	struct drm_gem_mm *mm;
-
-	if (!obj->on_map)
-		return;
-	mm = obj->dev->mm_private;
-	list = &obj->map_list;
-
-	drm_ht_remove_item(&mm->offset_hash, list);
-	free_unr(mm->idxunr, list->key);
-	obj->on_map = false;
-}
-
-int
-drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
+drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
     struct vm_object **obj_res, int nprot)
 {
-	struct drm_device *dev;
 	struct drm_gem_object *gem_obj;
 	struct vm_object *vm_obj;
 
-	dev = drm_get_device_from_kdev(kdev);
-	if ((dev->driver->driver_features & DRIVER_GEM) == 0)
-		return (ENODEV);
 	DRM_LOCK(dev);
 	gem_obj = drm_gem_object_from_offset(dev, *offset);
 	if (gem_obj == NULL) {
 		DRM_UNLOCK(dev);
-		return (ENODEV);
+		return (-ENODEV);
 	}
 	drm_gem_object_reference(gem_obj);
 	DRM_UNLOCK(dev);
@@ -465,7 +474,7 @@
 	    DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
 	if (vm_obj == NULL) {
 		drm_gem_object_unreference_unlocked(gem_obj);
-		return (EINVAL);
+		return (-EINVAL);
 	}
 	*offset = DRM_GEM_MAPPING_MAPOFF(*offset);
 	*obj_res = vm_obj;

Modified: trunk/sys/dev/drm2/drm_gem_names.c
===================================================================
--- trunk/sys/dev/drm2/drm_gem_names.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_gem_names.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_gem_names.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_gem_names.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -128,22 +128,40 @@
 	return (arg.res);
 }
 
+void *
+drm_gem_find_ptr(struct drm_gem_names *names, uint32_t name)
+{
+	struct drm_gem_name *n;
+	void *res;
+
+	mtx_lock(&names->lock);
+	LIST_FOREACH(n, gem_name_hash_index(names, name), link) {
+		if (n->name == name) {
+			res = n->ptr;
+			mtx_unlock(&names->lock);
+			return (res);
+		}
+	}
+	mtx_unlock(&names->lock);
+	return (NULL);
+}
+
 int
 drm_gem_name_create(struct drm_gem_names *names, void *p, uint32_t *name)
 {
 	struct drm_gem_name *np;
 
+	if (*name != 0) {
+		return (-EALREADY);
+	}
+
 	np = malloc(sizeof(struct drm_gem_name), M_GEM_NAMES, M_WAITOK);
 	mtx_lock(&names->lock);
-	if (*name != 0) {
-		mtx_unlock(&names->lock);
-		return (EALREADY);
-	}
 	np->name = alloc_unr(names->unr);
 	if (np->name == -1) {
 		mtx_unlock(&names->lock);
 		free(np, M_GEM_NAMES);
-		return (ENOMEM);
+		return (-ENOMEM);
 	}
 	*name = np->name;
 	np->ptr = p;

Modified: trunk/sys/dev/drm2/drm_gem_names.h
===================================================================
--- trunk/sys/dev/drm2/drm_gem_names.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_gem_names.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -27,7 +27,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_gem_names.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_gem_names.h 271816 2014-09-18 20:32:40Z dumbbell $
  *
  */
 
@@ -55,6 +55,7 @@
 void drm_gem_names_init(struct drm_gem_names *names);
 void drm_gem_names_fini(struct drm_gem_names *names);
 uint32_t drm_gem_find_name(struct drm_gem_names *names, void *ptr);
+void *drm_gem_find_ptr(struct drm_gem_names *names, uint32_t name);
 void *drm_gem_name_ref(struct drm_gem_names *names, uint32_t name,
     void (*ref)(void *));
 int drm_gem_name_create(struct drm_gem_names *names, void *obj, uint32_t *name);

Modified: trunk/sys/dev/drm2/drm_global.c
===================================================================
--- trunk/sys/dev/drm2/drm_global.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_global.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /**************************************************************************
  *
  * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
@@ -29,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/drm2/drm_global.c 262988 2014-03-10 23:16:19Z dumbbell $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_global.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm_global.h>
@@ -76,7 +77,11 @@
 	sx_xlock(&item->mutex);
 	if (item->refcount == 0) {
 		item->object = malloc(ref->size, M_DRM_GLOBAL,
-		    M_WAITOK | M_ZERO);
+		    M_NOWAIT | M_ZERO);
+		if (unlikely(item->object == NULL)) {
+			ret = -ENOMEM;
+			goto out_err;
+		}
 
 		ref->object = item->object;
 		ret = ref->init(ref);
@@ -94,6 +99,7 @@
 	item->object = NULL;
 	return ret;
 }
+EXPORT_SYMBOL(drm_global_item_ref);
 
 void drm_global_item_unref(struct drm_global_reference *ref)
 {
@@ -109,3 +115,4 @@
 	}
 	sx_xunlock(&item->mutex);
 }
+EXPORT_SYMBOL(drm_global_item_unref);


Property changes on: trunk/sys/dev/drm2/drm_global.c
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/dev/drm2/drm_global.h
===================================================================
--- trunk/sys/dev/drm2/drm_global.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_global.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /**************************************************************************
  *
  * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
@@ -27,7 +28,7 @@
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/* $FreeBSD$ */
+/* $FreeBSD: stable/10/sys/dev/drm2/drm_global.h 247834 2013-03-05 09:27:21Z kib $ */
 
 #ifndef _DRM_GLOBAL_H_
 #define _DRM_GLOBAL_H_
@@ -54,59 +55,3 @@
 MALLOC_DECLARE(M_DRM_GLOBAL);
 
 #endif
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-/* $FreeBSD$ */
-
-#ifndef _DRM_GLOBAL_H_
-#define _DRM_GLOBAL_H_
-enum drm_global_types {
-	DRM_GLOBAL_TTM_MEM = 0,
-	DRM_GLOBAL_TTM_BO,
-	DRM_GLOBAL_TTM_OBJECT,
-	DRM_GLOBAL_NUM
-};
-
-struct drm_global_reference {
-	enum drm_global_types global_type;
-	size_t size;
-	void *object;
-	int (*init) (struct drm_global_reference *);
-	void (*release) (struct drm_global_reference *);
-};
-
-extern void drm_global_init(void);
-extern void drm_global_release(void);
-extern int drm_global_item_ref(struct drm_global_reference *ref);
-extern void drm_global_item_unref(struct drm_global_reference *ref);
-
-MALLOC_DECLARE(M_DRM_GLOBAL);
-
-#endif


Property changes on: trunk/sys/dev/drm2/drm_global.h
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/dev/drm2/drm_hashtab.c
===================================================================
--- trunk/sys/dev/drm2/drm_hashtab.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_hashtab.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -28,7 +28,7 @@
  **************************************************************************/
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_hashtab.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_hashtab.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 /*
  * Simple open hash tab implementation.
@@ -55,6 +55,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(drm_ht_create);
 
 void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
 {
@@ -70,8 +71,8 @@
 		DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
 }
 
-static struct drm_hash_item *
-drm_ht_find_key(struct drm_open_hash *ht, unsigned long key)
+static struct drm_hash_item *drm_ht_find_key(struct drm_open_hash *ht,
+					  unsigned long key)
 {
 	struct drm_hash_item *entry;
 	struct drm_hash_item_list *h_list;
@@ -113,6 +114,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(drm_ht_insert_item);
 
 /*
  * Just insert an item and return any "bits" bit key that hasn't been
@@ -141,6 +143,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(drm_ht_just_insert_please);
 
 int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
 		     struct drm_hash_item **item)
@@ -154,6 +157,7 @@
 	*item = entry;
 	return 0;
 }
+EXPORT_SYMBOL(drm_ht_find_item);
 
 int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
 {
@@ -172,6 +176,7 @@
 	LIST_REMOVE(item, head);
 	return 0;
 }
+EXPORT_SYMBOL(drm_ht_remove_item);
 
 void drm_ht_remove(struct drm_open_hash *ht)
 {
@@ -180,3 +185,4 @@
 		ht->table = NULL;
 	}
 }
+EXPORT_SYMBOL(drm_ht_remove);

Modified: trunk/sys/dev/drm2/drm_hashtab.h
===================================================================
--- trunk/sys/dev/drm2/drm_hashtab.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_hashtab.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -28,7 +28,7 @@
  **************************************************************************/
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_hashtab.h 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_hashtab.h 235783 2012-05-22 11:07:44Z kib $");
 
 /*
  * Simple open hash tab implementation.

Modified: trunk/sys/dev/drm2/drm_ioc32.c
===================================================================
--- trunk/sys/dev/drm2/drm_ioc32.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_ioc32.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (C) Paul Mackerras 2005.
  * All Rights Reserved.
@@ -20,13 +21,10 @@
  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  * IN THE SOFTWARE.
- *
- * Authors:
- *    Paul Mackerras <paulus at samba.org>
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_ioc32.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include "opt_compat.h"
 
@@ -35,10 +33,6 @@
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
 
-/** @file drm_ioc32.c
- * 32-bit ioctl compatibility routines for the DRM.
- */
- 
 #define DRM_IOCTL_VERSION32		DRM_IOWR(0x00, drm_version32_t)
 #define DRM_IOCTL_GET_UNIQUE32		DRM_IOWR(0x01, drm_unique32_t)
 #define DRM_IOCTL_GET_MAP32		DRM_IOWR(0x04, drm_map32_t)
@@ -87,7 +81,8 @@
 	u32 desc;		  /**< User-space buffer to hold desc */
 } drm_version32_t;
 
-static int compat_drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_version(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_version32_t *v32 = data;
 	struct drm_version version;
@@ -99,7 +94,7 @@
 	version.date = (void *)(unsigned long)v32->date;
 	version.desc_len = v32->desc_len;
 	version.desc = (void *)(unsigned long)v32->desc;
-	
+
 	err = drm_version(dev, (void *)&version, file_priv);
 	if (err)
 		return err;
@@ -119,7 +114,8 @@
 	u32 unique;	/**< Unique name for driver instantiation */
 } drm_unique32_t;
 
-static int compat_drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_getunique(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_unique32_t *uq32 = data;
 	struct drm_unique u;
@@ -137,7 +133,8 @@
 	return 0;
 }
 
-static int compat_drm_setunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_setunique(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_unique32_t *uq32 = data;
 	struct drm_unique u;
@@ -157,7 +154,8 @@
 	int mtrr;		/**< MTRR slot used */
 } drm_map32_t;
 
-static int compat_drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_getmap(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_map32_t *m32 = data;
 	struct drm_map map;
@@ -183,13 +181,14 @@
 
 }
 
-static int compat_drm_addmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_addmap(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_map32_t *m32 = data;
 	struct drm_map map;
 	int err;
 	void *handle;
-	
+
 	map.offset = (unsigned long)m32->offset;
 	map.size = (unsigned long)m32->size;
 	map.type = m32->type;
@@ -202,7 +201,7 @@
 	m32->offset = map.offset;
 	m32->mtrr = map.mtrr;
 	handle = map.handle;
-	
+
 	m32->handle = (unsigned long)handle;
 	if (m32->handle != (unsigned long)handle)
 		DRM_DEBUG("compat_drm_addmap truncated handle"
@@ -212,7 +211,8 @@
 	return 0;
 }
 
-static int compat_drm_rmmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_rmmap(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_map32_t *m32 = data;
 	struct drm_map map;
@@ -231,7 +231,8 @@
 	u32 iocs;	/**< Ioctl count */
 } drm_client32_t;
 
-static int compat_drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_getclient(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_client32_t *c32 = data;
 	struct drm_client client;
@@ -261,7 +262,8 @@
 	} data[15];
 } drm_stats32_t;
 
-static int compat_drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_getstats(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_stats32_t *s32 = data;
 	struct drm_stats stats;
@@ -289,7 +291,8 @@
 	u32 agp_start;		 /**< Start address in the AGP aperture */
 } drm_buf_desc32_t;
 
-static int compat_drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_addbufs(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_buf_desc32_t *b32 = data;
 	struct drm_buf_desc buf;
@@ -312,11 +315,12 @@
 	b32->high_mark = buf.high_mark;
 	b32->flags = buf.flags;
 	b32->agp_start = buf.agp_start;
-	
+
 	return 0;
 }
 
-static int compat_drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_markbufs(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_buf_desc32_t *b32 = data;
 	struct drm_buf_desc buf;
@@ -324,7 +328,7 @@
 	buf.size = b32->size;
 	buf.low_mark = b32->low_mark;
 	buf.high_mark = b32->high_mark;
-	
+
 	return drm_markbufs(dev, (void *)&buf, file_priv);
 }
 
@@ -333,7 +337,8 @@
 	u32 list;
 } drm_buf_info32_t;
 
-static int compat_drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_infobufs(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_buf_info32_t *req32 = data;
 	drm_buf_desc32_t *to;
@@ -351,7 +356,7 @@
 	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
 	request = malloc(nbytes, DRM_MEM_BUFLISTS, M_ZERO | M_NOWAIT);
 	if (!request)
-		return -EFAULT;
+		return -ENOMEM;
 	list = (struct drm_buf_desc *) (request + 1);
 
 	request->count = count;
@@ -389,7 +394,8 @@
 	u32 list;		/**< Buffer information */
 } drm_buf_map32_t;
 
-static int compat_drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_mapbufs(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_buf_map32_t *req32 = data;
 	drm_buf_pub32_t *list32;
@@ -407,7 +413,7 @@
 	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
 	request = malloc(nbytes, DRM_MEM_BUFLISTS, M_ZERO | M_NOWAIT);
 	if (!request)
-		return -EFAULT;
+		return -ENOMEM;
 	list = (struct drm_buf_pub *) (request + 1);
 
 	request->count = count;
@@ -437,7 +443,8 @@
 	u32 list;
 } drm_buf_free32_t;
 
-static int compat_drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_freebufs(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_buf_free32_t *req32 = data;
 	struct drm_buf_free request;
@@ -453,7 +460,8 @@
 	u32 handle;		/**< Handle of map */
 } drm_ctx_priv_map32_t;
 
-static int compat_drm_setsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_setsareactx(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_ctx_priv_map32_t *req32 = data;
 	struct drm_ctx_priv_map request;
@@ -464,7 +472,8 @@
 	return drm_setsareactx(dev, (void *)&request, file_priv);
 }
 
-static int compat_drm_getsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_getsareactx(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_ctx_priv_map32_t *req32 = data;
 	struct drm_ctx_priv_map request;
@@ -486,7 +495,8 @@
 	u32 contexts;
 } drm_ctx_res32_t;
 
-static int compat_drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_resctx(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_ctx_res32_t *res32 = data;
 	struct drm_ctx_res res;
@@ -517,12 +527,18 @@
 	int granted_count;	  /**< Number of buffers granted */
 } drm_dma32_t;
 
-static int compat_drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_dma(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_dma32_t *d32 = data;
 	struct drm_dma d;
 	int err;
 
+	if (!dev->driver->dma_ioctl) {
+		DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
+		return -EINVAL;
+	}
+
 	d.context = d32->context;
 	d.send_count = d32->send_count;
 	d.send_indices = (int *)(unsigned long)d32->send_indices;
@@ -532,7 +548,7 @@
 	d.request_indices = (int *)(unsigned long)d32->request_indices;
 	d.request_sizes = (int *)(unsigned long)d32->request_sizes;
 
-	err = drm_dma(dev, (void *)&d, file_priv);
+	err = dev->driver->dma_ioctl(dev, (void *)&d, file_priv);
 	if (err)
 		return err;
 
@@ -542,11 +558,13 @@
 	return 0;
 }
 
+#if __OS_HAS_AGP
 typedef struct drm_agp_mode32 {
 	u32 mode;	/**< AGP mode */
 } drm_agp_mode32_t;
 
-static int compat_drm_agp_enable(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_agp_enable(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_agp_mode32_t *m32 = data;
 	struct drm_agp_mode mode;
@@ -570,7 +588,8 @@
 	unsigned short id_device;
 } drm_agp_info32_t;
 
-static int compat_drm_agp_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_agp_info(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_agp_info32_t *i32 = data;
 	struct drm_agp_info info;
@@ -600,7 +619,8 @@
 	u32 physical;	/**< Physical used by i810 */
 } drm_agp_buffer32_t;
 
-static int compat_drm_agp_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_agp_alloc(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_agp_buffer32_t *req32 = data;
 	struct drm_agp_buffer request;
@@ -619,7 +639,8 @@
 	return 0;
 }
 
-static int compat_drm_agp_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_agp_free(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_agp_buffer32_t *req32 = data;
 	struct drm_agp_buffer request;
@@ -634,7 +655,8 @@
 	u32 offset;	/**< In bytes -- will round to page boundary */
 } drm_agp_binding32_t;
 
-static int compat_drm_agp_bind(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_agp_bind(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_agp_binding32_t *req32 = data;
 	struct drm_agp_binding request;
@@ -645,15 +667,17 @@
 	return drm_agp_bind_ioctl(dev, (void *)&request, file_priv);
 }
 
-static int compat_drm_agp_unbind(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_agp_unbind(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_agp_binding32_t *req32 = data;
 	struct drm_agp_binding request;
-	
+
 	request.handle = req32->handle;
-	
+
 	return drm_agp_unbind_ioctl(dev, (void *)&request, file_priv);
 }
+#endif				/* __OS_HAS_AGP */
 
 typedef struct drm_scatter_gather32 {
 	u32 size;	/**< In bytes -- will round to page boundary */
@@ -660,7 +684,8 @@
 	u32 handle;	/**< Used for mapping / unmapping */
 } drm_scatter_gather32_t;
 
-static int compat_drm_sg_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_sg_alloc(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_scatter_gather32_t *req32 = data;
 	struct drm_scatter_gather request;
@@ -678,7 +703,8 @@
 	return 0;
 }
 
-static int compat_drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_sg_free(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_scatter_gather32_t *req32 = data;
 	struct drm_scatter_gather request;
@@ -688,6 +714,7 @@
 	return drm_sg_free(dev, (void *)&request, file_priv);
 }
 
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
 typedef struct drm_update_draw32 {
 	drm_drawable_t handle;
 	unsigned int type;
@@ -695,22 +722,8 @@
 	/* 64-bit version has a 32-bit pad here */
 	u64 data;	/**< Pointer */
 } __attribute__((packed)) drm_update_draw32_t;
+#endif
 
-static int compat_drm_update_draw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	drm_update_draw32_t *update32 = data;
-	struct drm_update_draw request;
-	int err;
-
-	request.handle = update32->handle;
-	request.type = update32->type;
-	request.num = update32->num;
-	request.data = update32->data;
-
-	err = drm_update_draw(dev, (void *)&request, file_priv);
-	return err;
-}
-
 struct drm_wait_vblank_request32 {
 	enum drm_vblank_seq_type type;
 	unsigned int sequence;
@@ -729,7 +742,8 @@
 	struct drm_wait_vblank_reply32 reply;
 } drm_wait_vblank32_t;
 
-static int compat_drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int compat_drm_wait_vblank(struct drm_device *dev, void *data,
+    struct drm_file *file_priv)
 {
 	drm_wait_vblank32_t *req32 = data;
 	union drm_wait_vblank request;
@@ -751,12 +765,12 @@
 	return 0;
 }
 
-drm_ioctl_desc_t drm_compat_ioctls[256] = {
-	DRM_IOCTL_DEF(DRM_IOCTL_VERSION32, compat_drm_version, 0),
+struct drm_ioctl_desc drm_compat_ioctls[256] = {
+	DRM_IOCTL_DEF(DRM_IOCTL_VERSION32, compat_drm_version, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE32, compat_drm_getunique, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP32, compat_drm_getmap, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT32, compat_drm_getclient, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS32, compat_drm_getstats, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP32, compat_drm_getmap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT32, compat_drm_getclient, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS32, compat_drm_getstats, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE32, compat_drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP32, compat_drm_addmap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS32, compat_drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -769,7 +783,7 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX32, compat_drm_getsareactx, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX32, compat_drm_resctx, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_DMA32, compat_drm_dma, DRM_AUTH),
-	
+#if __OS_HAS_AGP
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE32, compat_drm_agp_enable, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO32, compat_drm_agp_info, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC32, compat_drm_agp_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -776,12 +790,12 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE32, compat_drm_agp_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND32, compat_drm_agp_bind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND32, compat_drm_agp_unbind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	
+#endif
 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC32, compat_drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE32, compat_drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	
-	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW32, compat_drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW32, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK32, compat_drm_wait_vblank, DRM_UNLOCKED),
 };
 


Property changes on: trunk/sys/dev/drm2/drm_ioc32.c
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/dev/drm2/drm_ioctl.c
===================================================================
--- trunk/sys/dev/drm2/drm_ioctl.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_ioctl.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,15 @@
 /* $MidnightBSD$ */
-/*-
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Fri Jan  8 09:01:26 1999 by faith at valinux.com
+ *
  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,163 +32,173 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_ioctl.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_ioctl.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/** @file drm_ioctl.c
- * Varios minor DRM ioctls not applicable to other files, such as versioning
- * information and reporting DRM information to userland.
- */
-
 #include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_core.h>
 
-/*
- * Beginning in revision 1.1 of the DRM interface, getunique will return
- * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
- * before setunique has been called.  The format for the bus-specific part of
- * the unique is not defined for any other bus.
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
  */
 int drm_getunique(struct drm_device *dev, void *data,
 		  struct drm_file *file_priv)
 {
 	struct drm_unique *u = data;
+	struct drm_master *master = file_priv->master;
 
-	if (u->unique_len >= dev->unique_len) {
-		if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
-			return EFAULT;
+	if (u->unique_len >= master->unique_len) {
+		if (copy_to_user(u->unique, master->unique, master->unique_len))
+			return -EFAULT;
 	}
-	u->unique_len = dev->unique_len;
+	u->unique_len = master->unique_len;
 
 	return 0;
 }
 
-/* Deprecated in DRM version 1.1, and will return EBUSY when setversion has
- * requested version 1.1 or greater.
+static void
+drm_unset_busid(struct drm_device *dev,
+		struct drm_master *master)
+{
+
+	free(master->unique, DRM_MEM_DRIVER);
+	master->unique = NULL;
+	master->unique_len = 0;
+	master->unique_size = 0;
+}
+
+/**
+ * Set the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from userspace into drm_device::unique, and verifies that
+ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
+ * in interface version 1.1 and will return EBUSY when setversion has requested
+ * version 1.1 or greater.
  */
 int drm_setunique(struct drm_device *dev, void *data,
 		  struct drm_file *file_priv)
 {
 	struct drm_unique *u = data;
-	int domain, bus, slot, func, ret;
-	char *busid;
+	struct drm_master *master = file_priv->master;
+	int ret;
 
-	/* Check and copy in the submitted Bus ID */
+	if (master->unique_len || master->unique)
+		return -EBUSY;
+
 	if (!u->unique_len || u->unique_len > 1024)
-		return EINVAL;
+		return -EINVAL;
 
-	busid = malloc(u->unique_len + 1, DRM_MEM_DRIVER, M_WAITOK);
-	if (busid == NULL)
-		return ENOMEM;
+	if (!dev->driver->bus->set_unique)
+		return -EINVAL;
 
-	if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) {
-		free(busid, DRM_MEM_DRIVER);
-		return EFAULT;
-	}
-	busid[u->unique_len] = '\0';
+	ret = dev->driver->bus->set_unique(dev, master, u);
+	if (ret)
+		goto err;
 
-	/* Return error if the busid submitted doesn't match the device's actual
-	 * busid.
-	 */
-	ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func);
-	if (ret != 3) {
-		free(busid, DRM_MEM_DRIVER);
-		return EINVAL;
-	}
-	domain = bus >> 8;
-	bus &= 0xff;
-	
-	if ((domain != dev->pci_domain) ||
-	    (bus != dev->pci_bus) ||
-	    (slot != dev->pci_slot) ||
-	    (func != dev->pci_func)) {
-		free(busid, DRM_MEM_DRIVER);
-		return EINVAL;
-	}
+	return 0;
 
-	/* Actually set the device's busid now. */
-	DRM_LOCK(dev);
-	if (dev->unique_len || dev->unique) {
-		DRM_UNLOCK(dev);
-		return EBUSY;
-	}
-
-	dev->unique_len = u->unique_len;
-	dev->unique = busid;
-	DRM_UNLOCK(dev);
-
-	return 0;
+err:
+	drm_unset_busid(dev, master);
+	return ret;
 }
 
-
-static int
-drm_set_busid(struct drm_device *dev)
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
 {
+	struct drm_master *master = file_priv->master;
+	int ret;
 
-	DRM_LOCK(dev);
+	if (master->unique != NULL)
+		drm_unset_busid(dev, master);
 
-	if (dev->unique != NULL) {
-		DRM_UNLOCK(dev);
-		return EBUSY;
-	}
-
-	dev->unique_len = 20;
-	dev->unique = malloc(dev->unique_len + 1, DRM_MEM_DRIVER, M_NOWAIT);
-	if (dev->unique == NULL) {
-		DRM_UNLOCK(dev);
-		return ENOMEM;
-	}
-
-	snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%1x",
-	    dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
-
-	DRM_UNLOCK(dev);
-
+	ret = dev->driver->bus->set_busid(dev, master);
+	if (ret)
+		goto err;
 	return 0;
+err:
+	drm_unset_busid(dev, master);
+	return ret;
 }
 
-int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_getmap(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
 {
-	struct drm_map     *map = data;
-	drm_local_map_t    *mapinlist;
-	int          idx;
-	int	     i = 0;
+	struct drm_map *map = data;
+	struct drm_map_list *r_list = NULL;
+	struct list_head *list;
+	int idx;
+	int i;
 
 	idx = map->offset;
+	if (idx < 0)
+		return -EINVAL;
 
+	i = 0;
 	DRM_LOCK(dev);
-	if (idx < 0) {
-		DRM_UNLOCK(dev);
-		return EINVAL;
-	}
-
-	TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
+	list_for_each(list, &dev->maplist) {
 		if (i == idx) {
-			map->offset = mapinlist->offset;
-			map->size   = mapinlist->size;
-			map->type   = mapinlist->type;
-			map->flags  = mapinlist->flags;
-			map->handle = mapinlist->handle;
-			map->mtrr   = mapinlist->mtrr;
+			r_list = list_entry(list, struct drm_map_list, head);
 			break;
 		}
 		i++;
 	}
+	if (!r_list || !r_list->map) {
+		DRM_UNLOCK(dev);
+		return -EINVAL;
+	}
 
+	map->offset = r_list->map->offset;
+	map->size = r_list->map->size;
+	map->type = r_list->map->type;
+	map->flags = r_list->map->flags;
+	map->handle = (void *)(unsigned long) r_list->user_token;
+	map->mtrr = r_list->map->mtrr;
 	DRM_UNLOCK(dev);
 
- 	if (mapinlist == NULL)
-		return EINVAL;
-
 	return 0;
 }
 
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
 int drm_getclient(struct drm_device *dev, void *data,
 		  struct drm_file *file_priv)
 {
@@ -185,52 +205,64 @@
 	struct drm_client *client = data;
 	struct drm_file *pt;
 	int idx;
-	int i = 0;
+	int i;
 
 	idx = client->idx;
+	i = 0;
+
 	DRM_LOCK(dev);
-	TAILQ_FOREACH(pt, &dev->files, link) {
-		if (i == idx) {
-			client->auth  = pt->authenticated;
-			client->pid   = pt->pid;
-			client->uid   = pt->uid;
+	list_for_each_entry(pt, &dev->filelist, lhead) {
+		if (i++ >= idx) {
+			client->auth = pt->authenticated;
+			client->pid = pt->pid;
+			client->uid = pt->uid;
 			client->magic = pt->magic;
-			client->iocs  = pt->ioctl_count;
+			client->iocs = pt->ioctl_count;
 			DRM_UNLOCK(dev);
+
 			return 0;
 		}
-		i++;
 	}
 	DRM_UNLOCK(dev);
 
-	return EINVAL;
+	return -EINVAL;
 }
 
-int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getstats(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
 {
 	struct drm_stats *stats = data;
-	int          i;
+	int i;
 
-	memset(stats, 0, sizeof(struct drm_stats));
-	
-	DRM_LOCK(dev);
+	memset(stats, 0, sizeof(*stats));
 
 	for (i = 0; i < dev->counters; i++) {
 		if (dev->types[i] == _DRM_STAT_LOCK)
 			stats->data[i].value =
-			    (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
-		else 
+			    (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+		else
 			stats->data[i].value = atomic_read(&dev->counts[i]);
 		stats->data[i].type = dev->types[i];
 	}
-	
+
 	stats->count = dev->counters;
 
-	DRM_UNLOCK(dev);
-
 	return 0;
 }
 
+/**
+ * Get device/driver capabilities
+ */
 int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
 	struct drm_get_cap *req = data;
@@ -250,72 +282,81 @@
 	case DRM_CAP_DUMB_PREFER_SHADOW:
 		req->value = dev->mode_config.prefer_shadow;
 		break;
+	case DRM_CAP_PRIME:
+		req->value |= false /* XXXKIB dev->driver->prime_fd_to_handle */ ? DRM_PRIME_CAP_IMPORT : 0;
+		req->value |= false /* XXXKIB dev->driver->prime_handle_to_fd */ ? DRM_PRIME_CAP_EXPORT : 0;
+		break;
+	case DRM_CAP_TIMESTAMP_MONOTONIC:
+		req->value = drm_timestamp_monotonic;
+		break;
 	default:
-		return EINVAL;
+		return -EINVAL;
 	}
 	return 0;
 }
 
-
-#define DRM_IF_MAJOR	1
-#define DRM_IF_MINOR	2
-
-int drm_setversion(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
 	struct drm_set_version *sv = data;
-	struct drm_set_version ver;
-	int if_version;
+	int if_version, retcode = 0;
 
-	/* Save the incoming data, and set the response before continuing
-	 * any further.
-	 */
-	ver = *sv;
-	sv->drm_di_major = DRM_IF_MAJOR;
-	sv->drm_di_minor = DRM_IF_MINOR;
-	sv->drm_dd_major = dev->driver->major;
-	sv->drm_dd_minor = dev->driver->minor;
-
-	DRM_DEBUG("ver.drm_di_major %d ver.drm_di_minor %d "
-	    "ver.drm_dd_major %d ver.drm_dd_minor %d\n",
-	    ver.drm_di_major, ver.drm_di_minor, ver.drm_dd_major,
-	    ver.drm_dd_minor);
-	DRM_DEBUG("sv->drm_di_major %d sv->drm_di_minor %d "
-	    "sv->drm_dd_major %d sv->drm_dd_minor %d\n",
-	    sv->drm_di_major, sv->drm_di_minor, sv->drm_dd_major,
-	    sv->drm_dd_minor);
-
-	if (ver.drm_di_major != -1) {
-		if (ver.drm_di_major != DRM_IF_MAJOR ||
-		    ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
-			return EINVAL;
+	if (sv->drm_di_major != -1) {
+		if (sv->drm_di_major != DRM_IF_MAJOR ||
+		    sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+			retcode = -EINVAL;
+			goto done;
 		}
-		if_version = DRM_IF_VERSION(ver.drm_di_major,
-		    ver.drm_dd_minor);
-		dev->if_version = DRM_MAX(if_version, dev->if_version);
-		if (ver.drm_di_minor >= 1) {
+		if_version = DRM_IF_VERSION(sv->drm_di_major,
+					    sv->drm_di_minor);
+		dev->if_version = max(if_version, dev->if_version);
+		if (sv->drm_di_minor >= 1) {
 			/*
 			 * Version 1.1 includes tying of DRM to specific device
+			 * Version 1.4 has proper PCI domain support
 			 */
-			drm_set_busid(dev);
+			retcode = drm_set_busid(dev, file_priv);
+			if (retcode)
+				goto done;
 		}
 	}
 
-	if (ver.drm_dd_major != -1) {
-		if (ver.drm_dd_major != dev->driver->major ||
-		    ver.drm_dd_minor < 0 ||
-		    ver.drm_dd_minor > dev->driver->minor)
-		{
-			return EINVAL;
+	if (sv->drm_dd_major != -1) {
+		if (sv->drm_dd_major != dev->driver->major ||
+		    sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+		    dev->driver->minor) {
+			retcode = -EINVAL;
+			goto done;
 		}
+
+		if (dev->driver->set_version)
+			dev->driver->set_version(dev, sv);
 	}
 
-	return 0;
+done:
+	sv->drm_di_major = DRM_IF_MAJOR;
+	sv->drm_di_minor = DRM_IF_MINOR;
+	sv->drm_dd_major = dev->driver->major;
+	sv->drm_dd_minor = dev->driver->minor;
+
+	return retcode;
 }
 
-
-int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/** No-op ioctl. */
+int drm_noop(struct drm_device *dev, void *data,
+	     struct drm_file *file_priv)
 {
 	DRM_DEBUG("\n");
 	return 0;
 }
+EXPORT_SYMBOL(drm_noop);

Modified: trunk/sys/dev/drm2/drm_irq.c
===================================================================
--- trunk/sys/dev/drm2/drm_irq.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_irq.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,6 +1,17 @@
 /* $MidnightBSD$ */
-/*-
- * Copyright 2003 Eric Anholt
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith at valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -17,28 +28,17 @@
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
- * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Eric Anholt <anholt at FreeBSD.org>
- *
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_irq.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_irq.c 314667 2017-03-04 13:03:31Z avg $");
 
-/** @file drm_irq.c
- * Support code for handling setup/teardown of interrupt handlers and
- * handing interrupt handlers off to the drivers.
- */
-
 #include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
 
-MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
-
 /* Access macro for slots in vblank timestamp ringbuffer. */
 #define vblanktimestamp(dev, crtc, count) ( \
 	(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
@@ -54,181 +54,33 @@
  */
 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
 int drm_irq_by_busid(struct drm_device *dev, void *data,
 		     struct drm_file *file_priv)
 {
-	struct drm_irq_busid *irq = data;
+	struct drm_irq_busid *p = data;
 
-	if ((irq->busnum >> 8) != dev->pci_domain ||
-	    (irq->busnum & 0xff) != dev->pci_bus ||
-	    irq->devnum != dev->pci_slot ||
-	    irq->funcnum != dev->pci_func)
-		return EINVAL;
+	if (!dev->driver->bus->irq_by_busid)
+		return -EINVAL;
 
-	irq->irq = dev->irq;
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
 
-	DRM_DEBUG("%d:%d:%d => IRQ %d\n",
-	    irq->busnum, irq->devnum, irq->funcnum, irq->irq);
-
-	return 0;
+	return dev->driver->bus->irq_by_busid(dev, p);
 }
 
-static void
-drm_irq_handler_wrap(void *arg)
-{
-	struct drm_device *dev = arg;
-
-	mtx_lock(&dev->irq_lock);
-	dev->driver->irq_handler(arg);
-	mtx_unlock(&dev->irq_lock);
-}
-
-int
-drm_irq_install(struct drm_device *dev)
-{
-	int retcode;
-
-	if (dev->irq == 0 || dev->dev_private == NULL)
-		return (EINVAL);
-
-	DRM_DEBUG("irq=%d\n", dev->irq);
-
-	DRM_LOCK(dev);
-	if (dev->irq_enabled) {
-		DRM_UNLOCK(dev);
-		return EBUSY;
-	}
-	dev->irq_enabled = 1;
-
-	dev->context_flag = 0;
-
-	/* Before installing handler */
-	if (dev->driver->irq_preinstall)
-		dev->driver->irq_preinstall(dev);
-	DRM_UNLOCK(dev);
-
-	/* Install handler */
-	retcode = bus_setup_intr(dev->device, dev->irqr,
-	    INTR_TYPE_TTY | INTR_MPSAFE, NULL,
-	    (dev->driver->driver_features & DRIVER_LOCKLESS_IRQ) != 0 ?
-		drm_irq_handler_wrap : dev->driver->irq_handler,
-	    dev, &dev->irqh);
-	if (retcode != 0)
-		goto err;
-
-	/* After installing handler */
-	DRM_LOCK(dev);
-	if (dev->driver->irq_postinstall)
-		dev->driver->irq_postinstall(dev);
-	DRM_UNLOCK(dev);
-
-	return (0);
-err:
-	device_printf(dev->device, "Error setting interrupt: %d\n", retcode);
-	dev->irq_enabled = 0;
-
-	return (retcode);
-}
-
-int drm_irq_uninstall(struct drm_device *dev)
-{
-	int i;
-
-	if (!dev->irq_enabled)
-		return EINVAL;
-
-	dev->irq_enabled = 0;
-
-	/*
-	* Wake up any waiters so they don't hang.
-	*/
-	if (dev->num_crtcs) {
-		mtx_lock(&dev->vbl_lock);
-		for (i = 0; i < dev->num_crtcs; i++) {
-			wakeup(&dev->_vblank_count[i]);
-			dev->vblank_enabled[i] = 0;
-			dev->last_vblank[i] =
-				dev->driver->get_vblank_counter(dev, i);
-		}
-		mtx_unlock(&dev->vbl_lock);
-	}
-
-	DRM_DEBUG("irq=%d\n", dev->irq);
-
-	if (dev->driver->irq_uninstall)
-		dev->driver->irq_uninstall(dev);
-
-	DRM_UNLOCK(dev);
-	bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
-	DRM_LOCK(dev);
-
-	return 0;
-}
-
-int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	struct drm_control *ctl = data;
-	int err;
-
-	switch (ctl->func) {
-	case DRM_INST_HANDLER:
-		/* Handle drivers whose DRM used to require IRQ setup but the
-		 * no longer does.
-		 */
-		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
-			return 0;
-		if (drm_core_check_feature(dev, DRIVER_MODESET))
-			return 0;
-		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
-		    ctl->irq != dev->irq)
-			return EINVAL;
-		return drm_irq_install(dev);
-	case DRM_UNINST_HANDLER:
-		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
-			return 0;
-		if (drm_core_check_feature(dev, DRIVER_MODESET))
-			return 0;
-		DRM_LOCK(dev);
-		err = drm_irq_uninstall(dev);
-		DRM_UNLOCK(dev);
-		return err;
-	default:
-		return EINVAL;
-	}
-}
-
-#define NSEC_PER_USEC	1000L
-#define NSEC_PER_SEC	1000000000L
-
-int64_t
-timeval_to_ns(const struct timeval *tv)
-{
-	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
-		tv->tv_usec * NSEC_PER_USEC;
-}
-
-struct timeval
-ns_to_timeval(const int64_t nsec)
-{
-        struct timeval tv;
-	uint32_t rem;
-
-	if (nsec == 0) {
-		tv.tv_sec = 0;
-		tv.tv_usec = 0;
-		return (tv);
-	}
-
-        tv.tv_sec = nsec / NSEC_PER_SEC;
-	rem = nsec % NSEC_PER_SEC;
-        if (rem < 0) {
-                tv.tv_sec--;
-                rem += NSEC_PER_SEC;
-        }
-	tv.tv_usec = rem / 1000;
-        return (tv);
-}
-
 /*
  * Clear vblank timestamp buffer for a crtc.
  */
@@ -238,13 +90,6 @@
 		DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
 }
 
-static int64_t
-abs64(int64_t x)
-{
-
-	return (x < 0 ? -x : x);
-}
-
 /*
  * Disable vblank irq's on crtc, make sure that last vblank count
  * of hardware and corresponding consistent software vblank counter
@@ -254,9 +99,10 @@
 static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 {
 	u32 vblcount;
-	int64_t diff_ns;
+	s64 diff_ns;
 	int vblrc;
 	struct timeval tvblank;
+	int count = DRM_TIMESTAMP_MAXRETRIES;
 
 	/* Prevent vblank irq processing while disabling vblank irqs,
 	 * so no updates of timestamps or count can happen after we've
@@ -282,8 +128,11 @@
 	do {
 		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
 		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
 
+	if (!count)
+		vblrc = 0;
+
 	/* Compute time difference to stored timestamp of last vblank
 	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
 	 */
@@ -305,6 +154,7 @@
 	 */
 	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
 		atomic_inc(&dev->_vblank_count[crtc]);
+		smp_mb__after_atomic_inc();
 	}
 
 	/* Invalidate all timestamps while vblank irq's are off. */
@@ -313,7 +163,7 @@
 	mtx_unlock(&dev->vblank_time_lock);
 }
 
-static void vblank_disable_fn(void * arg)
+static void vblank_disable_fn(void *arg)
 {
 	struct drm_device *dev = (struct drm_device *)arg;
 	int i;
@@ -350,35 +200,58 @@
 	free(dev->vblank_inmodeset, DRM_MEM_VBLANK);
 	free(dev->_vblank_time, DRM_MEM_VBLANK);
 
+	mtx_destroy(&dev->vbl_lock);
+	mtx_destroy(&dev->vblank_time_lock);
+
 	dev->num_crtcs = 0;
 }
+EXPORT_SYMBOL(drm_vblank_cleanup);
 
 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 {
-	int i;
+	int i, ret = -ENOMEM;
 
-	callout_init(&dev->vblank_disable_callout, CALLOUT_MPSAFE);
-#if 0
+	callout_init(&dev->vblank_disable_callout, 1);
 	mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
-#endif
 	mtx_init(&dev->vblank_time_lock, "drmvtl", NULL, MTX_DEF);
 
 	dev->num_crtcs = num_crtcs;
 
 	dev->_vblank_count = malloc(sizeof(atomic_t) * num_crtcs,
-	    DRM_MEM_VBLANK, M_WAITOK);
+	    DRM_MEM_VBLANK, M_NOWAIT);
+	if (!dev->_vblank_count)
+		goto err;
+
 	dev->vblank_refcount = malloc(sizeof(atomic_t) * num_crtcs,
-	    DRM_MEM_VBLANK, M_WAITOK);
+	    DRM_MEM_VBLANK, M_NOWAIT);
+	if (!dev->vblank_refcount)
+		goto err;
+
 	dev->vblank_enabled = malloc(num_crtcs * sizeof(int),
-	    DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+	    DRM_MEM_VBLANK, M_NOWAIT | M_ZERO);
+	if (!dev->vblank_enabled)
+		goto err;
+
 	dev->last_vblank = malloc(num_crtcs * sizeof(u32),
-	    DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+	    DRM_MEM_VBLANK, M_NOWAIT | M_ZERO);
+	if (!dev->last_vblank)
+		goto err;
+
 	dev->last_vblank_wait = malloc(num_crtcs * sizeof(u32),
-	    DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+	    DRM_MEM_VBLANK, M_NOWAIT | M_ZERO);
+	if (!dev->last_vblank_wait)
+		goto err;
+
 	dev->vblank_inmodeset = malloc(num_crtcs * sizeof(int),
-	    DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+	    DRM_MEM_VBLANK, M_NOWAIT | M_ZERO);
+	if (!dev->vblank_inmodeset)
+		goto err;
+
 	dev->_vblank_time = malloc(num_crtcs * DRM_VBLANKTIME_RBSIZE *
-	    sizeof(struct timeval), DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+	    sizeof(struct timeval), DRM_MEM_VBLANK, M_NOWAIT | M_ZERO);
+	if (!dev->_vblank_time)
+		goto err;
+
 	DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
 
 	/* Driver specific high-precision vblank timestamping supported? */
@@ -395,16 +268,203 @@
 
 	dev->vblank_disable_allowed = 0;
 	return 0;
+
+err:
+	drm_vblank_cleanup(dev);
+	return ret;
 }
+EXPORT_SYMBOL(drm_vblank_init);
 
-void
-drm_calc_timestamping_constants(struct drm_crtc *crtc)
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c irq_preinstall() and \c irq_postinstall() functions
+ * before and after the installation.
+ */
+int drm_irq_install(struct drm_device *dev)
 {
-	int64_t linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
-	uint64_t dotclock;
+	int ret;
+	unsigned long sh_flags = 0;
 
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
+
+	if (drm_dev_to_irq(dev) == 0)
+		return -EINVAL;
+
+	DRM_LOCK(dev);
+
+	/* Driver must have been initialized */
+	if (!dev->dev_private) {
+		DRM_UNLOCK(dev);
+		return -EINVAL;
+	}
+
+	if (dev->irq_enabled) {
+		DRM_UNLOCK(dev);
+		return -EBUSY;
+	}
+	dev->irq_enabled = 1;
+	DRM_UNLOCK(dev);
+
+	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
+
+	/* Before installing handler */
+	if (dev->driver->irq_preinstall)
+		dev->driver->irq_preinstall(dev);
+
+	/* Install handler */
+	sh_flags = INTR_TYPE_TTY | INTR_MPSAFE;
+	if (!drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+		/*
+		 * FIXME Linux<->FreeBSD: This seems to make
+		 * bus_setup_intr() unhappy: it was reported to return
+		 * EINVAL on an i915 board (8086:2592 in a Thinkpad
+		 * X41).
+		 *
+		 * For now, no driver we have use that.
+		 */
+		sh_flags |= INTR_EXCL;
+
+	ret = -bus_setup_intr(dev->dev, dev->irqr, sh_flags, NULL,
+	    dev->driver->irq_handler, dev, &dev->irqh);
+
+	if (ret < 0) {
+		device_printf(dev->dev, "Error setting interrupt: %d\n", -ret);
+		DRM_LOCK(dev);
+		dev->irq_enabled = 0;
+		DRM_UNLOCK(dev);
+		return ret;
+	}
+
+	/* After installing handler */
+	if (dev->driver->irq_postinstall)
+		ret = dev->driver->irq_postinstall(dev);
+
+	if (ret < 0) {
+		DRM_LOCK(dev);
+		dev->irq_enabled = 0;
+		DRM_UNLOCK(dev);
+		bus_teardown_intr(dev->dev, dev->irqr, dev->irqh);
+		dev->driver->bus->free_irq(dev);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_irq_install);
+
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device *dev)
+{
+	int irq_enabled, i;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
+
+	DRM_LOCK(dev);
+	irq_enabled = dev->irq_enabled;
+	dev->irq_enabled = 0;
+	DRM_UNLOCK(dev);
+
+	/*
+	 * Wake up any waiters so they don't hang.
+	 */
+	if (dev->num_crtcs) {
+		mtx_lock(&dev->vbl_lock);
+		for (i = 0; i < dev->num_crtcs; i++) {
+			DRM_WAKEUP(&dev->_vblank_count[i]);
+			dev->vblank_enabled[i] = 0;
+			dev->last_vblank[i] =
+				dev->driver->get_vblank_counter(dev, i);
+		}
+		mtx_unlock(&dev->vbl_lock);
+	}
+
+	if (!irq_enabled)
+		return -EINVAL;
+
+	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
+
+	if (dev->driver->irq_uninstall)
+		dev->driver->irq_uninstall(dev);
+
+	bus_teardown_intr(dev->dev, dev->irqr, dev->irqh);
+	dev->driver->bus->free_irq(dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_irq_uninstall);
+
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+int drm_control(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_control *ctl = data;
+
+	/* if we haven't irq we fallback for compatibility reasons -
+	 * this used to be a separate function in drm_dma.h
+	 */
+
+
+	switch (ctl->func) {
+	case DRM_INST_HANDLER:
+		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+			return 0;
+		if (drm_core_check_feature(dev, DRIVER_MODESET))
+			return 0;
+		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+		    ctl->irq != drm_dev_to_irq(dev))
+			return -EINVAL;
+		return drm_irq_install(dev);
+	case DRM_UNINST_HANDLER:
+		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+			return 0;
+		if (drm_core_check_feature(dev, DRIVER_MODESET))
+			return 0;
+		return drm_irq_uninstall(dev);
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+{
+	s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+	u64 dotclock;
+
 	/* Dot clock in Hz: */
-	dotclock = (uint64_t) crtc->hwmode.clock * 1000;
+	dotclock = (u64) crtc->hwmode.clock * 1000;
 
 	/* Fields of interlaced scanout modes are only halve a frame duration.
 	 * Double the dotclock to get halve the frame-/line-/pixelduration.
@@ -418,10 +478,10 @@
 		 * line duration, frame duration and pixel duration in
 		 * nanoseconds:
 		 */
-		pixeldur_ns = (int64_t)1000000000 / dotclock;
-		linedur_ns  = ((uint64_t)crtc->hwmode.crtc_htotal *
-		    1000000000) / dotclock;
-		framedur_ns = (int64_t)crtc->hwmode.crtc_vtotal * linedur_ns;
+		pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
+		linedur_ns  = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
+					      1000000000), dotclock);
+		framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
 	} else
 		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
 			  crtc->base.id);
@@ -437,6 +497,7 @@
 		  crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
 		  (int) linedur_ns, (int) pixeldur_ns);
 }
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
 
 /**
  * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
@@ -482,16 +543,17 @@
  * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
  *
  */
-int
-drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
-    int *max_error, struct timeval *vblank_time, unsigned flags,
-    struct drm_crtc *refcrtc)
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+					  int *max_error,
+					  struct timeval *vblank_time,
+					  unsigned flags,
+					  struct drm_crtc *refcrtc)
 {
 	struct timeval stime, raw_time;
 	struct drm_display_mode *mode;
 	int vbl_status, vtotal, vdisplay;
 	int vpos, hpos, i;
-	int64_t framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+	s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
 	bool invbl;
 
 	if (crtc < 0 || crtc >= dev->num_crtcs) {
@@ -531,7 +593,7 @@
 	 */
 	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
 		/* Disable preemption to make it very likely to
-		 * succeed in the first iteration.
+		 * succeed in the first iteration even on PREEMPT_RT kernel.
 		 */
 		critical_enter();
 
@@ -543,6 +605,10 @@
 
 		/* Get system timestamp after query. */
 		getmicrouptime(&raw_time);
+#ifdef FREEBSD_NOTYET
+		if (!drm_timestamp_monotonic)
+			mono_time_offset = ktime_get_monotonic_offset();
+#endif /* FREEBSD_NOTYET */
 
 		critical_exit();
 
@@ -556,7 +622,7 @@
 		duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
 
 		/* Accept result with <  max_error nsecs timing uncertainty. */
-		if (duration_ns <= (int64_t) *max_error)
+		if (duration_ns <= (s64) *max_error)
 			break;
 	}
 
@@ -580,7 +646,7 @@
 	 * since start of scanout at first display scanline. delta_ns
 	 * can be negative if start of scanout hasn't happened yet.
 	 */
-	delta_ns = (int64_t)vpos * linedur_ns + (int64_t)hpos * pixeldur_ns;
+	delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
 
 	/* Is vpos outside nominal vblank area, but less than
 	 * 1/100 of a frame height away from start of vblank?
@@ -600,6 +666,13 @@
 		vbl_status |= 0x8;
 	}
 
+#ifdef FREEBSD_NOTYET
+	if (!drm_timestamp_monotonic)
+		etime = ktime_sub(etime, mono_time_offset);
+
+	/* save this only for debugging purposes */
+	tv_etime = ktime_to_timeval(etime);
+#endif /* FREEBSD_NOTYET */
 	/* Subtract time delta from raw timestamp to get final
 	 * vblank_time timestamp for end of vblank.
 	 */
@@ -616,7 +689,21 @@
 
 	return vbl_status;
 }
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
+static struct timeval get_drm_timestamp(void)
+{
+	struct timeval now;
+
+	microtime(&now);
+#ifdef FREEBSD_NOTYET
+	if (!drm_timestamp_monotonic)
+		now = ktime_sub(now, ktime_get_monotonic_offset());
+#endif /* defined(FREEBSD_NOTYET) */
+
+	return now;
+}
+
 /**
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
  * vblank interval.
@@ -640,7 +727,7 @@
 u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
 			      struct timeval *tvblank, unsigned flags)
 {
-	int ret = 0;
+	int ret;
 
 	/* Define requested maximum error on timestamps (nanoseconds). */
 	int max_error = (int) drm_timestamp_precision * 1000;
@@ -654,12 +741,13 @@
 	}
 
 	/* GPU high precision timestamp query unsupported or failed.
-	 * Return gettimeofday timestamp as best estimate.
+	 * Return current monotonic/gettimeofday timestamp as best estimate.
 	 */
-	microtime(tvblank);
+	*tvblank = get_drm_timestamp();
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_get_last_vbltimestamp);
 
 /**
  * drm_vblank_count - retrieve "cooked" vblank counter value
@@ -674,6 +762,7 @@
 {
 	return atomic_read(&dev->_vblank_count[crtc]);
 }
+EXPORT_SYMBOL(drm_vblank_count);
 
 /**
  * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
@@ -702,13 +791,55 @@
 	do {
 		cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
 		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
-		rmb();
+		smp_rmb();
 	} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
 
 	return cur_vblank;
 }
+EXPORT_SYMBOL(drm_vblank_count_and_time);
 
+static void send_vblank_event(struct drm_device *dev,
+		struct drm_pending_vblank_event *e,
+		unsigned long seq, struct timeval *now)
+{
+	WARN_ON_SMP(!mtx_owned(&dev->event_lock));
+	e->event.sequence = seq;
+	e->event.tv_sec = now->tv_sec;
+	e->event.tv_usec = now->tv_usec;
+
+	list_add_tail(&e->base.link,
+		      &e->base.file_priv->event_list);
+	drm_event_wakeup(&e->base);
+	CTR3(KTR_DRM, "vblank_event_delivered %d %d %d",
+	    e->base.pid, e->pipe, e->event.sequence);
+}
+
 /**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+		struct drm_pending_vblank_event *e)
+{
+	struct timeval now;
+	unsigned int seq;
+	if (crtc >= 0) {
+		seq = drm_vblank_count_and_time(dev, crtc, &now);
+	} else {
+		seq = 0;
+
+		now = get_drm_timestamp();
+	}
+	send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
+/**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
  * @crtc: counter to update
@@ -767,7 +898,9 @@
 		vblanktimestamp(dev, crtc, tslot) = t_vblank;
 	}
 
+	smp_mb__before_atomic_inc();
 	atomic_add(diff, &dev->_vblank_count[crtc]);
+	smp_mb__after_atomic_inc();
 }
 
 /**
@@ -787,7 +920,7 @@
 
 	mtx_lock(&dev->vbl_lock);
 	/* Going from 0->1 means we have to enable interrupts again */
-	if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], 1) == 0) {
+	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
 		mtx_lock(&dev->vblank_time_lock);
 		if (!dev->vblank_enabled[crtc]) {
 			/* Enable vblank irqs under vblank_time_lock protection.
@@ -796,7 +929,7 @@
 			 * timestamps. Filtercode in drm_handle_vblank() will
 			 * prevent double-accounting of same vblank interval.
 			 */
-			ret = -dev->driver->enable_vblank(dev, crtc);
+			ret = dev->driver->enable_vblank(dev, crtc);
 			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
 				  crtc, ret);
 			if (ret)
@@ -810,7 +943,7 @@
 	} else {
 		if (!dev->vblank_enabled[crtc]) {
 			atomic_dec(&dev->vblank_refcount[crtc]);
-			ret = EINVAL;
+			ret = -EINVAL;
 		}
 	}
 	mtx_unlock(&dev->vbl_lock);
@@ -817,6 +950,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_vblank_get);
 
 /**
  * drm_vblank_put - give up ownership of vblank events
@@ -828,17 +962,24 @@
  */
 void drm_vblank_put(struct drm_device *dev, int crtc)
 {
-	KASSERT(atomic_read(&dev->vblank_refcount[crtc]) != 0,
-	    ("Too many drm_vblank_put for crtc %d", crtc));
+	BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
 
 	/* Last user schedules interrupt disable */
-	if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], -1) == 1 &&
+	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
 	    (drm_vblank_offdelay > 0))
 		callout_reset(&dev->vblank_disable_callout,
 		    (drm_vblank_offdelay * DRM_HZ) / 1000,
 		    vblank_disable_fn, dev);
 }
+EXPORT_SYMBOL(drm_vblank_put);
 
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
 void drm_vblank_off(struct drm_device *dev, int crtc)
 {
 	struct drm_pending_vblank_event *e, *t;
@@ -847,11 +988,12 @@
 
 	mtx_lock(&dev->vbl_lock);
 	vblank_disable_and_save(dev, crtc);
-	mtx_lock(&dev->event_lock);
-	wakeup(&dev->_vblank_count[crtc]);
+	DRM_WAKEUP(&dev->_vblank_count[crtc]);
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
 	seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+	mtx_lock(&dev->event_lock);
 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
 		if (e->pipe != crtc)
 			continue;
@@ -858,26 +1000,20 @@
 		DRM_DEBUG("Sending premature vblank event on disable: \
 			  wanted %d, current %d\n",
 			  e->event.sequence, seq);
-
-		e->event.sequence = seq;
-		e->event.tv_sec = now.tv_sec;
-		e->event.tv_usec = now.tv_usec;
+		list_del(&e->base.link);
 		drm_vblank_put(dev, e->pipe);
-		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-		drm_event_wakeup(&e->base);
-		CTR3(KTR_DRM, "vblank_event_delivered %d %d %d",
-		    e->base.pid, e->pipe, e->event.sequence);
+		send_vblank_event(dev, e, seq, &now);
 	}
+	mtx_unlock(&dev->event_lock);
 
-	mtx_unlock(&dev->event_lock);
 	mtx_unlock(&dev->vbl_lock);
 }
+EXPORT_SYMBOL(drm_vblank_off);
 
 /**
  * drm_vblank_pre_modeset - account for vblanks across mode sets
  * @dev: DRM device
  * @crtc: CRTC in question
- * @post: post or pre mode set?
  *
  * Account for vblank events across mode setting events, which will likely
  * reset the hardware frame counter.
@@ -884,7 +1020,7 @@
  */
 void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
 {
-	/* vblank is not initialized (IRQ not installed ?) */
+	/* vblank is not initialized (IRQ not installed ?), or has been freed */
 	if (!dev->num_crtcs)
 		return;
 	/*
@@ -900,9 +1036,13 @@
 			dev->vblank_inmodeset[crtc] |= 0x2;
 	}
 }
+EXPORT_SYMBOL(drm_vblank_pre_modeset);
 
 void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
 {
+	/* vblank is not initialized (IRQ not installed ?), or has been freed */
+	if (!dev->num_crtcs)
+		return;
 
 	if (dev->vblank_inmodeset[crtc]) {
 		mtx_lock(&dev->vbl_lock);
@@ -915,6 +1055,7 @@
 		dev->vblank_inmodeset[crtc] = 0;
 	}
 }
+EXPORT_SYMBOL(drm_vblank_post_modeset);
 
 /**
  * drm_modeset_ctl - handle vblank event counter changes across mode switch
@@ -931,18 +1072,19 @@
 		    struct drm_file *file_priv)
 {
 	struct drm_modeset_ctl *modeset = data;
-	int ret = 0;
 	unsigned int crtc;
 
 	/* If drm_vblank_init() hasn't been called yet, just no-op */
 	if (!dev->num_crtcs)
-		goto out;
+		return 0;
 
+	/* KMS drivers handle this internally */
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return 0;
+
 	crtc = modeset->crtc;
-	if (crtc >= dev->num_crtcs) {
-		ret = -EINVAL;
-		goto out;
-	}
+	if (crtc >= dev->num_crtcs)
+		return -EINVAL;
 
 	switch (modeset->cmd) {
 	case _DRM_PRE_MODESET:
@@ -952,12 +1094,10 @@
 		drm_vblank_post_modeset(dev, crtc);
 		break;
 	default:
-		ret = -EINVAL;
-		break;
+		return -EINVAL;
 	}
 
-out:
-	return ret;
+	return 0;
 }
 
 static void
@@ -976,7 +1116,11 @@
 	unsigned int seq;
 	int ret;
 
-	e = malloc(sizeof *e, DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+	e = malloc(sizeof *e, DRM_MEM_VBLANK, M_NOWAIT | M_ZERO);
+	if (e == NULL) {
+		ret = -ENOMEM;
+		goto err_put;
+	}
 
 	e->pipe = pipe;
 	e->base.pid = curproc->p_pid;
@@ -990,7 +1134,7 @@
 	mtx_lock(&dev->event_lock);
 
 	if (file_priv->event_space < sizeof e->event) {
-		ret = EBUSY;
+		ret = -EBUSY;
 		goto err_unlock;
 	}
 
@@ -1011,15 +1155,9 @@
 
 	e->event.sequence = vblwait->request.sequence;
 	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
-		e->event.sequence = seq;
-		e->event.tv_sec = now.tv_sec;
-		e->event.tv_usec = now.tv_usec;
 		drm_vblank_put(dev, pipe);
-		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-		drm_event_wakeup(&e->base);
+		send_vblank_event(dev, e, seq, &now);
 		vblwait->reply.sequence = seq;
-		CTR3(KTR_DRM, "vblank_event_wakeup p1 %d %d %d", curproc->p_pid,
-		    pipe, vblwait->request.sequence);
 	} else {
 		/* drm_handle_vblank_events will call drm_vblank_put */
 		list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1033,6 +1171,7 @@
 err_unlock:
 	mtx_unlock(&dev->event_lock);
 	free(e, DRM_MEM_VBLANK);
+err_put:
 	drm_vblank_put(dev, pipe);
 	return ret;
 }
@@ -1055,14 +1194,14 @@
 		    struct drm_file *file_priv)
 {
 	union drm_wait_vblank *vblwait = data;
-	int ret = 0;
+	int ret;
 	unsigned int flags, seq, crtc, high_crtc;
 
 	if (/*(!drm_dev_to_irq(dev)) || */(!dev->irq_enabled))
-		return (EINVAL);
+		return -EINVAL;
 
 	if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
-		return (EINVAL);
+		return -EINVAL;
 
 	if (vblwait->request.type &
 	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
@@ -1071,7 +1210,7 @@
 			  vblwait->request.type,
 			  (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
 			   _DRM_VBLANK_HIGH_CRTC_MASK));
-		return (EINVAL);
+		return -EINVAL;
 	}
 
 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
@@ -1081,12 +1220,12 @@
 	else
 		crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
 	if (crtc >= dev->num_crtcs)
-		return (EINVAL);
+		return -EINVAL;
 
 	ret = drm_vblank_get(dev, crtc);
 	if (ret) {
 		DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
-		return (ret);
+		return ret;
 	}
 	seq = drm_vblank_count(dev, crtc);
 
@@ -1097,7 +1236,7 @@
 	case _DRM_VBLANK_ABSOLUTE:
 		break;
 	default:
-		ret = (EINVAL);
+		ret = -EINVAL;
 		goto done;
 	}
 
@@ -1113,6 +1252,8 @@
 		vblwait->request.sequence = seq + 1;
 	}
 
+	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+		  vblwait->request.sequence, crtc);
 	dev->last_vblank_wait[crtc] = vblwait->request.sequence;
 	mtx_lock(&dev->vblank_time_lock);
 	while (((drm_vblank_count(dev, crtc) - vblwait->request.sequence) >
@@ -1125,13 +1266,15 @@
 		 * application when crtc is disabled or irq
 		 * uninstalled anyway.
 		 */
-		ret = msleep(&dev->_vblank_count[crtc], &dev->vblank_time_lock,
+		ret = -msleep(&dev->_vblank_count[crtc], &dev->vblank_time_lock,
 		    PCATCH, "drmvbl", 3 * hz);
+		if (ret == -ERESTART)
+			ret = -ERESTARTSYS;
 		if (ret != 0)
 			break;
 	}
 	mtx_unlock(&dev->vblank_time_lock);
-	if (ret != EINTR) {
+	if (ret != -EINTR) {
 		struct timeval now;
 		long reply_seq;
 
@@ -1139,13 +1282,19 @@
 		CTR5(KTR_DRM, "wait_vblank %d %d rt %x success %d %d",
 		    curproc->p_pid, crtc, vblwait->request.type,
 		    vblwait->request.sequence, reply_seq);
+
 		vblwait->reply.sequence = reply_seq;
 		vblwait->reply.tval_sec = now.tv_sec;
 		vblwait->reply.tval_usec = now.tv_usec;
+
+		DRM_DEBUG("returning %d to client\n",
+			  vblwait->reply.sequence);
 	} else {
 		CTR5(KTR_DRM, "wait_vblank %d %d rt %x error %d %d",
 		    curproc->p_pid, crtc, vblwait->request.type, ret,
 		    vblwait->request.sequence);
+
+		DRM_DEBUG("vblank wait interrupted by signal\n");
 	}
 
 done:
@@ -1153,7 +1302,7 @@
 	return ret;
 }
 
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
 {
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
@@ -1160,7 +1309,6 @@
 	unsigned int seq;
 
 	seq = drm_vblank_count_and_time(dev, crtc, &now);
-	CTR2(KTR_DRM, "drm_handle_vblank_events %d %d", seq, crtc);
 
 	mtx_lock(&dev->event_lock);
 
@@ -1170,17 +1318,17 @@
 		if ((seq - e->event.sequence) > (1<<23))
 			continue;
 
-		e->event.sequence = seq;
-		e->event.tv_sec = now.tv_sec;
-		e->event.tv_usec = now.tv_usec;
+		DRM_DEBUG("vblank event on %d, current %d\n",
+			  e->event.sequence, seq);
+
+		list_del(&e->base.link);
 		drm_vblank_put(dev, e->pipe);
-		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-		drm_event_wakeup(&e->base);
-		CTR3(KTR_DRM, "vblank_event_wakeup p2 %d %d %d", e->base.pid,
-		    e->pipe, e->event.sequence);
+		send_vblank_event(dev, e, seq, &now);
 	}
 
 	mtx_unlock(&dev->event_lock);
+
+	CTR2(KTR_DRM, "drm_handle_vblank_events %d %d", seq, crtc);
 }
 
 /**
@@ -1194,7 +1342,7 @@
 bool drm_handle_vblank(struct drm_device *dev, int crtc)
 {
 	u32 vblcount;
-	int64_t diff_ns;
+	s64 diff_ns;
 	struct timeval tvblank;
 
 	if (!dev->num_crtcs)
@@ -1240,15 +1388,18 @@
 		/* Increment cooked vblank count. This also atomically commits
 		 * the timestamp computed above.
 		 */
+		smp_mb__before_atomic_inc();
 		atomic_inc(&dev->_vblank_count[crtc]);
+		smp_mb__after_atomic_inc();
 	} else {
 		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
 			  crtc, (int) diff_ns);
 	}
 
-	wakeup(&dev->_vblank_count[crtc]);
+	DRM_WAKEUP(&dev->_vblank_count[crtc]);
 	drm_handle_vblank_events(dev, crtc);
 
 	mtx_unlock(&dev->vblank_time_lock);
 	return true;
 }
+EXPORT_SYMBOL(drm_handle_vblank);

Modified: trunk/sys/dev/drm2/drm_linux_list.h
===================================================================
--- trunk/sys/dev/drm2/drm_linux_list.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_linux_list.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_linux_list.h 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_linux_list.h 254841 2013-08-25 10:28:02Z dumbbell $");
 
 #ifndef _DRM_LINUX_LIST_H_
 #define _DRM_LINUX_LIST_H_
@@ -145,6 +145,11 @@
 	    &pos->member != (head);					\
 	    pos = n, n = list_entry(n->member.next, __typeof(*n), member))
 
+#define list_for_each_entry_safe_from(pos, n, head, member) 			\
+	for (n = list_entry(pos->member.next, __typeof(*pos), member);		\
+	     &pos->member != (head);						\
+	     pos = n, n = list_entry(n->member.next, __typeof(*n), member))
+
 #define list_first_entry(ptr, type, member) \
 	list_entry((ptr)->next, type, member)
 

Modified: trunk/sys/dev/drm2/drm_linux_list_sort.c
===================================================================
--- trunk/sys/dev/drm2/drm_linux_list_sort.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_linux_list_sort.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -29,7 +29,7 @@
  */
 
 #include <dev/drm2/drmP.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_linux_list_sort.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_linux_list_sort.c 258707 2013-11-28 10:04:53Z dumbbell $");
 
 struct drm_list_sort_thunk {
 	int (*cmp)(void *, struct list_head *, struct list_head *);
@@ -43,8 +43,8 @@
 	struct drm_list_sort_thunk *thunk;
 
 	thunk = priv;
-	le1 = __DECONST(struct list_head *, d1);
-	le2 = __DECONST(struct list_head *, d2);
+	le1 = *(__DECONST(struct list_head **, d1));
+	le2 = *(__DECONST(struct list_head **, d2));
 	return ((thunk->cmp)(thunk->priv, le1, le2));
 }
 

Modified: trunk/sys/dev/drm2/drm_lock.c
===================================================================
--- trunk/sys/dev/drm2/drm_lock.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_lock.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,15 @@
 /* $MidnightBSD$ */
-/*-
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith at valinux.com
+ *
  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -22,141 +32,188 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_lock.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_lock.c 314593 2017-03-03 12:03:50Z avg $");
 
-/** @file drm_lock.c
- * Implementation of the ioctls and other support code for dealing with the
- * hardware lock.
+#include <dev/drm2/drmP.h>
+
+#if defined(__linux__)
+static int drm_notifier(void *priv);
+#endif
+
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
+/**
+ * Lock ioctl.
  *
- * The DRM hardware lock is a shared structure between the kernel and userland.
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
  *
- * On uncontended access where the new context was the last context, the
- * client may take the lock without dropping down into the kernel, using atomic
- * compare-and-set.
- *
- * If the client finds during compare-and-set that it was not the last owner
- * of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
- * lock, and may have side-effects of kernel-managed context switching.
- *
- * When the client releases the lock, if the lock is marked as being contended
- * by another client, then the DRM unlock ioctl is called so that the
- * contending client may be woken up.
+ * Add the current task to the lock wait queue, and attempt to take to lock.
  */
-
-#include <dev/drm2/drmP.h>
-
 int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
 	struct drm_lock *lock = data;
+	struct drm_master *master = file_priv->master;
 	int ret = 0;
 
+	++file_priv->lock_count;
+
 	if (lock->context == DRM_KERNEL_CONTEXT) {
 		DRM_ERROR("Process %d using kernel context %d\n",
-		    DRM_CURRENTPID, lock->context);
-		return EINVAL;
+			  DRM_CURRENTPID, lock->context);
+		return -EINVAL;
 	}
 
 	DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
-	    lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
-	    lock->flags);
+		  lock->context, DRM_CURRENTPID,
+		  master->lock.hw_lock->lock, lock->flags);
 
-	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) &&
-	    lock->context < 0)
-		return EINVAL;
+	mtx_lock(&master->lock.spinlock);
+	master->lock.user_waiters++;
+	mtx_unlock(&master->lock.spinlock);
 
-	DRM_LOCK(dev);
 	for (;;) {
-		if (drm_lock_take(&dev->lock, lock->context)) {
-			dev->lock.file_priv = file_priv;
-			dev->lock.lock_time = jiffies;
+#if defined(__linux__)
+		if (!master->lock.hw_lock) {
+			/* Device has been unregistered */
+			send_sig(SIGTERM, current, 0);
+			ret = -EINTR;
+			break;
+		}
+#endif
+		if (drm_lock_take(&master->lock, lock->context)) {
+			master->lock.file_priv = file_priv;
+			master->lock.lock_time = jiffies;
 			atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
-			break;  /* Got lock */
+			break;	/* Got lock */
 		}
 
 		/* Contention */
-		ret = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
+		DRM_UNLOCK_ASSERT(dev);
+		ret = -sx_sleep(&master->lock.lock_queue, &drm_global_mutex,
 		    PCATCH, "drmlk2", 0);
+		if (ret == -ERESTART)
+			ret = -ERESTARTSYS;
 		if (ret != 0)
 			break;
 	}
-	DRM_UNLOCK(dev);
+	mtx_lock(&master->lock.spinlock);
+	master->lock.user_waiters--;
+	mtx_unlock(&master->lock.spinlock);
 
-	if (ret == ERESTART)
-		DRM_DEBUG("restarting syscall\n");
-	else
-		DRM_DEBUG("%d %s\n", lock->context,
-		    ret ? "interrupted" : "has lock");
+	DRM_DEBUG("%d %s\n", lock->context,
+		  ret ? "interrupted" : "has lock");
+	if (ret) return ret;
 
-	if (ret != 0)
-		return ret;
+#if defined(__linux__)
+	/* don't set the block all signals on the master process for now 
+	 * really probably not the correct answer but lets us debug xkb
+ 	 * xserver for now */
+	if (!file_priv->is_master) {
+		sigemptyset(&dev->sigmask);
+		sigaddset(&dev->sigmask, SIGSTOP);
+		sigaddset(&dev->sigmask, SIGTSTP);
+		sigaddset(&dev->sigmask, SIGTTIN);
+		sigaddset(&dev->sigmask, SIGTTOU);
+		dev->sigdata.context = lock->context;
+		dev->sigdata.lock = master->lock.hw_lock;
+		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+	}
+#endif
 
-	/* XXX: Add signal blocking here */
+	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+	{
+		if (dev->driver->dma_quiescent(dev)) {
+			DRM_DEBUG("%d waiting for DMA quiescent\n",
+				  lock->context);
+			return -EBUSY;
+		}
+	}
 
-	if (dev->driver->dma_quiescent != NULL &&
-	    (lock->flags & _DRM_LOCK_QUIESCENT))
-		dev->driver->dma_quiescent(dev);
-
 	return 0;
 }
 
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
 int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
 	struct drm_lock *lock = data;
+	struct drm_master *master = file_priv->master;
 
-	DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
-	    lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
-	    lock->flags);
-
 	if (lock->context == DRM_KERNEL_CONTEXT) {
 		DRM_ERROR("Process %d using kernel context %d\n",
-		    DRM_CURRENTPID, lock->context);
-		return EINVAL;
+			  DRM_CURRENTPID, lock->context);
+		return -EINVAL;
 	}
 
 	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
 
-	DRM_LOCK(dev);
-	drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT);
-
-	if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) {
-		DRM_ERROR("\n");
+	if (drm_lock_free(&master->lock, lock->context)) {
+		/* FIXME: Should really bail out here. */
 	}
-	DRM_UNLOCK(dev);
 
+#if defined(__linux__)
+	unblock_all_signals();
+#endif
 	return 0;
 }
 
-int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context)
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static
+int drm_lock_take(struct drm_lock_data *lock_data,
+		  unsigned int context)
 {
+	unsigned int old, new, prev;
 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
-	unsigned int old, new;
 
+	mtx_lock(&lock_data->spinlock);
 	do {
 		old = *lock;
 		if (old & _DRM_LOCK_HELD)
 			new = old | _DRM_LOCK_CONT;
-		else
-			new = context | _DRM_LOCK_HELD;
-	} while (!atomic_cmpset_int(lock, old, new));
+		else {
+			new = context | _DRM_LOCK_HELD |
+				((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+				 _DRM_LOCK_CONT : 0);
+		}
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
+	mtx_unlock(&lock_data->spinlock);
 
 	if (_DRM_LOCKING_CONTEXT(old) == context) {
 		if (old & _DRM_LOCK_HELD) {
 			if (context != DRM_KERNEL_CONTEXT) {
 				DRM_ERROR("%d holds heavyweight lock\n",
-				    context);
+					  context);
 			}
 			return 0;
 		}
 	}
-	if (new == (context | _DRM_LOCK_HELD)) {
+
+	if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
 		/* Have lock */
 		return 1;
 	}
@@ -163,38 +220,162 @@
 	return 0;
 }
 
-/* This takes a lock forcibly and hands it to context.	Should ONLY be used
-   inside *_unlock to give lock to kernel before calling *_dma_schedule. */
-int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context)
+/**
+ * This takes a lock forcibly and hands it to context.	Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+			     unsigned int context)
 {
+	unsigned int old, new, prev;
 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
-	unsigned int old, new;
 
 	lock_data->file_priv = NULL;
 	do {
 		old = *lock;
 		new = context | _DRM_LOCK_HELD;
-	} while (!atomic_cmpset_int(lock, old, new));
-
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
 	return 1;
 }
 
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
 int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
 {
+	unsigned int old, new, prev;
 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
-	unsigned int old, new;
 
-	lock_data->file_priv = NULL;
+	mtx_lock(&lock_data->spinlock);
+	if (lock_data->kernel_waiters != 0) {
+		drm_lock_transfer(lock_data, 0);
+		lock_data->idle_has_lock = 1;
+		mtx_unlock(&lock_data->spinlock);
+		return 1;
+	}
+	mtx_unlock(&lock_data->spinlock);
+
 	do {
 		old = *lock;
-		new = 0;
-	} while (!atomic_cmpset_int(lock, old, new));
+		new = _DRM_LOCKING_CONTEXT(old);
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
 
 	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
 		DRM_ERROR("%d freed heavyweight lock held by %d\n",
-		    context, _DRM_LOCKING_CONTEXT(old));
+			  context, _DRM_LOCKING_CONTEXT(old));
 		return 1;
 	}
-	DRM_WAKEUP_INT((void *)&lock_data->lock_queue);
+	wake_up_interruptible(&lock_data->lock_queue);
 	return 0;
 }
+
+#if defined(__linux__)
+/**
+ * If we get here, it means that the process has called DRM_IOCTL_LOCK
+ * without calling DRM_IOCTL_UNLOCK.
+ *
+ * If the lock is not held, then let the signal proceed as usual.  If the lock
+ * is held, then set the contended flag and keep the signal blocked.
+ *
+ * \param priv pointer to a drm_sigdata structure.
+ * \return one if the signal should be delivered normally, or zero if the
+ * signal should be blocked.
+ */
+static int drm_notifier(void *priv)
+{
+	struct drm_sigdata *s = (struct drm_sigdata *) priv;
+	unsigned int old, new, prev;
+
+	/* Allow signal delivery if lock isn't held */
+	if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
+	    || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
+		return 1;
+
+	/* Otherwise, set flag to force call to
+	   drmUnlock */
+	do {
+		old = s->lock->lock;
+		new = old | _DRM_LOCK_CONT;
+		prev = cmpxchg(&s->lock->lock, old, new);
+	} while (prev != old);
+	return 0;
+}
+#endif
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
+{
+	int ret;
+
+	mtx_lock(&lock_data->spinlock);
+	lock_data->kernel_waiters++;
+	if (!lock_data->idle_has_lock) {
+
+		mtx_unlock(&lock_data->spinlock);
+		ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+		mtx_lock(&lock_data->spinlock);
+
+		if (ret == 1)
+			lock_data->idle_has_lock = 1;
+	}
+	mtx_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
+
+void drm_idlelock_release(struct drm_lock_data *lock_data)
+{
+	unsigned int old, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	mtx_lock(&lock_data->spinlock);
+	if (--lock_data->kernel_waiters == 0) {
+		if (lock_data->idle_has_lock) {
+			do {
+				old = *lock;
+				prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+			} while (prev != old);
+			wake_up_interruptible(&lock_data->lock_queue);
+			lock_data->idle_has_lock = 0;
+		}
+	}
+	mtx_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_release);
+
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_master *master = file_priv->master;
+	return (file_priv->lock_count && master->lock.hw_lock &&
+		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+		master->lock.file_priv == file_priv);
+}

Modified: trunk/sys/dev/drm2/drm_memory.c
===================================================================
--- trunk/sys/dev/drm2/drm_memory.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_memory.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,13 +1,19 @@
 /* $MidnightBSD$ */
-/*-
- *Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+/**
+ * \file drm_memory.c
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Thu Feb  4 14:00:34 1999 by faith at valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2011 The FreeBSD Foundation
- * All rights reserved.
+ * All Rights Reserved.
  *
- * Portions of this software were developed by Konstantin Belousov
- * under sponsorship from the FreeBSD Foundation.
- *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  * to deal in the Software without restriction, including without limitation
@@ -26,103 +32,104 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith at valinux.com>
- *    Gareth Hughes <gareth at valinux.com>
- *
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_memory.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_memory.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/** @file drm_memory.c
- * Wrappers for kernel memory allocation routines, and MTRR management support.
- *
- * This file previously implemented a memory consumption tracking system using
- * the "area" argument for various different types of allocations, but that
- * has been stripped out for now.
- */
-
 #include <dev/drm2/drmP.h>
 
-MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
-MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
-MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
-MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
-MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
-MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
-MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
-MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
-MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
-MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
-MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
-MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
-MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
-MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
-MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
-MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
-    "DRM CTXBITMAP Data Structures");
-MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
-MALLOC_DEFINE(DRM_MEM_DRAWABLE, "drm_drawable", "DRM DRAWABLE Data Structures");
-MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
-MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
-MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
-
-void drm_mem_init(void)
+#if __OS_HAS_AGP
+static void *agp_remap(unsigned long offset, unsigned long size,
+		       struct drm_device * dev)
 {
+	/*
+	 * FIXME Linux<->FreeBSD: Not implemented. This is never called
+	 * on FreeBSD anyway, because drm_agp_mem->cant_use_aperture is
+	 * set to 0.
+	 */
+	return NULL;
 }
 
-void drm_mem_uninit(void)
+#define	vunmap(handle)
+
+/** Wrapper around agp_free_memory() */
+void drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
+	device_t agpdev;
+
+	agpdev = agp_find_device();
+	if (!agpdev || !handle)
+		return;
+
+	agp_free_memory(agpdev, handle);
 }
+EXPORT_SYMBOL(drm_free_agp);
 
-void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map)
+/** Wrapper around agp_bind_memory() */
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
 {
-	return pmap_mapdev_attr(map->offset, map->size, VM_MEMATTR_WRITE_COMBINING);
+	device_t agpdev;
+
+	agpdev = agp_find_device();
+	if (!agpdev || !handle)
+		return -EINVAL;
+
+	return -agp_bind_memory(agpdev, handle, start * PAGE_SIZE);
 }
 
-void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map)
+/** Wrapper around agp_unbind_memory() */
+int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
-	return pmap_mapdev(map->offset, map->size);
+	device_t agpdev;
+
+	agpdev = agp_find_device();
+	if (!agpdev || !handle)
+		return -EINVAL;
+
+	return -agp_unbind_memory(agpdev, handle);
 }
+EXPORT_SYMBOL(drm_unbind_agp);
 
-void drm_ioremapfree(drm_local_map_t *map)
+#else  /*  __OS_HAS_AGP  */
+static inline void *agp_remap(unsigned long offset, unsigned long size,
+			      struct drm_device * dev)
 {
-	pmap_unmapdev((vm_offset_t) map->virtual, map->size);
+	return NULL;
 }
 
-int
-drm_mtrr_add(unsigned long offset, size_t size, int flags)
+#endif				/* agp */
+
+void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
 {
-	int act;
-	struct mem_range_desc mrdesc;
-
-	mrdesc.mr_base = offset;
-	mrdesc.mr_len = size;
-	mrdesc.mr_flags = flags;
-	act = MEMRANGE_SET_UPDATE;
-	strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
-	return mem_range_attr_set(&mrdesc, &act);
+	if (drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+		map->handle = agp_remap(map->offset, map->size, dev);
+	else
+		map->handle = pmap_mapdev(map->offset, map->size);
 }
+EXPORT_SYMBOL(drm_core_ioremap);
 
-int
-drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags)
+void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
 {
-	int act;
-	struct mem_range_desc mrdesc;
-
-	mrdesc.mr_base = offset;
-	mrdesc.mr_len = size;
-	mrdesc.mr_flags = flags;
-	act = MEMRANGE_SET_REMOVE;
-	strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
-	return mem_range_attr_set(&mrdesc, &act);
+	if (drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+		map->handle = agp_remap(map->offset, map->size, dev);
+	else
+		map->handle = pmap_mapdev_attr(map->offset, map->size,
+		    VM_MEMATTR_WRITE_COMBINING);
 }
+EXPORT_SYMBOL(drm_core_ioremap_wc);
 
-void
-drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
+void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
 {
+	if (!map->handle || !map->size)
+		return;
 
-	pmap_invalidate_cache_pages(pages, num_pages);
+	if (drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+		vunmap(map->handle);
+	else
+		pmap_unmapdev((vm_offset_t)map->handle, map->size);
 }
+EXPORT_SYMBOL(drm_core_ioremapfree);

Modified: trunk/sys/dev/drm2/drm_mm.c
===================================================================
--- trunk/sys/dev/drm2/drm_mm.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_mm.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -28,7 +28,7 @@
  **************************************************************************/
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_mm.c 249081 2013-04-04 05:39:37Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_mm.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 /*
  * Generic simple memory manager implementation. Intended to be used as a base
@@ -54,8 +54,7 @@
 {
 	struct drm_mm_node *child;
 
-	child = malloc(sizeof(*child), DRM_MEM_MM, M_ZERO |
-	    (atomic ? M_NOWAIT : M_WAITOK));
+	child = malloc(sizeof(*child), DRM_MEM_MM, M_NOWAIT | M_ZERO);
 
 	if (unlikely(child == NULL)) {
 		mtx_lock(&mm->unused_lock);
@@ -73,6 +72,11 @@
 	return child;
 }
 
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm:	memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
 int drm_mm_pre_get(struct drm_mm *mm)
 {
 	struct drm_mm_node *node;
@@ -80,7 +84,7 @@
 	mtx_lock(&mm->unused_lock);
 	while (mm->num_unused < MM_UNUSED_TARGET) {
 		mtx_unlock(&mm->unused_lock);
-		node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
+		node = malloc(sizeof(*node), DRM_MEM_MM, M_NOWAIT | M_ZERO);
 		mtx_lock(&mm->unused_lock);
 
 		if (unlikely(node == NULL)) {
@@ -94,6 +98,7 @@
 	mtx_unlock(&mm->unused_lock);
 	return 0;
 }
+EXPORT_SYMBOL(drm_mm_pre_get);
 
 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 {
@@ -111,39 +116,46 @@
 
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 				 struct drm_mm_node *node,
-				 unsigned long size, unsigned alignment)
+				 unsigned long size, unsigned alignment,
+				 unsigned long color)
 {
 	struct drm_mm *mm = hole_node->mm;
-	unsigned long tmp = 0, wasted = 0;
 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+	unsigned long adj_start = hole_start;
+	unsigned long adj_end = hole_end;
 
-	KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
+	BUG_ON(!hole_node->hole_follows || node->allocated);
 
-	if (alignment)
-		tmp = hole_start % alignment;
+	if (mm->color_adjust)
+		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
-	if (!tmp) {
+	if (alignment) {
+		unsigned tmp = adj_start % alignment;
+		if (tmp)
+			adj_start += alignment - tmp;
+	}
+
+	if (adj_start == hole_start) {
 		hole_node->hole_follows = 0;
-		list_del_init(&hole_node->hole_stack);
-	} else
-		wasted = alignment - tmp;
+		list_del(&hole_node->hole_stack);
+	}
 
-	node->start = hole_start + wasted;
+	node->start = adj_start;
 	node->size = size;
 	node->mm = mm;
+	node->color = color;
 	node->allocated = 1;
 
 	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
-	KASSERT(node->start + node->size <= hole_end, ("hole pos"));
+	BUG_ON(node->start + node->size > adj_end);
 
+	node->hole_follows = 0;
 	if (node->start + node->size < hole_end) {
 		list_add(&node->hole_stack, &mm->hole_stack);
 		node->hole_follows = 1;
-	} else {
-		node->hole_follows = 0;
 	}
 }
 
@@ -150,6 +162,7 @@
 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
 					     unsigned long size,
 					     unsigned alignment,
+					     unsigned long color,
 					     int atomic)
 {
 	struct drm_mm_node *node;
@@ -158,66 +171,89 @@
 	if (unlikely(node == NULL))
 		return NULL;
 
-	drm_mm_insert_helper(hole_node, node, size, alignment);
+	drm_mm_insert_helper(hole_node, node, size, alignment, color);
 
 	return node;
 }
+EXPORT_SYMBOL(drm_mm_get_block_generic);
 
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
-		       unsigned long size, unsigned alignment)
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. The preallocated memory node
+ * must be cleared.
+ */
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+			       unsigned long size, unsigned alignment,
+			       unsigned long color)
 {
 	struct drm_mm_node *hole_node;
 
-	hole_node = drm_mm_search_free(mm, size, alignment, 0);
+	hole_node = drm_mm_search_free_generic(mm, size, alignment,
+					       color, 0);
 	if (!hole_node)
 		return -ENOSPC;
 
-	drm_mm_insert_helper(hole_node, node, size, alignment);
-
+	drm_mm_insert_helper(hole_node, node, size, alignment, color);
 	return 0;
 }
+EXPORT_SYMBOL(drm_mm_insert_node_generic);
 
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+		       unsigned long size, unsigned alignment)
+{
+	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
+EXPORT_SYMBOL(drm_mm_insert_node);
+
 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 				       struct drm_mm_node *node,
 				       unsigned long size, unsigned alignment,
+				       unsigned long color,
 				       unsigned long start, unsigned long end)
 {
 	struct drm_mm *mm = hole_node->mm;
-	unsigned long tmp = 0, wasted = 0;
 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+	unsigned long adj_start = hole_start;
+	unsigned long adj_end = hole_end;
 
-	KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
+	BUG_ON(!hole_node->hole_follows || node->allocated);
 
-	if (hole_start < start)
-		wasted += start - hole_start;
-	if (alignment)
-		tmp = (hole_start + wasted) % alignment;
+	if (adj_start < start)
+		adj_start = start;
+	if (adj_end > end)
+		adj_end = end;
 
-	if (tmp)
-		wasted += alignment - tmp;
+	if (mm->color_adjust)
+		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
-	if (!wasted) {
+	if (alignment) {
+		unsigned tmp = adj_start % alignment;
+		if (tmp)
+			adj_start += alignment - tmp;
+	}
+
+	if (adj_start == hole_start) {
 		hole_node->hole_follows = 0;
-		list_del_init(&hole_node->hole_stack);
+		list_del(&hole_node->hole_stack);
 	}
 
-	node->start = hole_start + wasted;
+	node->start = adj_start;
 	node->size = size;
 	node->mm = mm;
+	node->color = color;
 	node->allocated = 1;
 
 	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
-	KASSERT(node->start + node->size <= hole_end, ("hole_end"));
-	KASSERT(node->start + node->size <= end, ("end"));
+	BUG_ON(node->start + node->size > adj_end);
+	BUG_ON(node->start + node->size > end);
 
+	node->hole_follows = 0;
 	if (node->start + node->size < hole_end) {
 		list_add(&node->hole_stack, &mm->hole_stack);
 		node->hole_follows = 1;
-	} else {
-		node->hole_follows = 0;
 	}
 }
 
@@ -224,6 +260,7 @@
 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
 						unsigned long size,
 						unsigned alignment,
+						unsigned long color,
 						unsigned long start,
 						unsigned long end,
 						int atomic)
@@ -234,47 +271,66 @@
 	if (unlikely(node == NULL))
 		return NULL;
 
-	drm_mm_insert_helper_range(hole_node, node, size, alignment,
+	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
 				   start, end);
 
 	return node;
 }
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
 
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
-				unsigned long size, unsigned alignment,
-				unsigned long start, unsigned long end)
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. This is for range
+ * restricted allocations. The preallocated memory node must be cleared.
+ */
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+					unsigned long size, unsigned alignment, unsigned long color,
+					unsigned long start, unsigned long end)
 {
 	struct drm_mm_node *hole_node;
 
-	hole_node = drm_mm_search_free_in_range(mm, size, alignment,
-						start, end, 0);
+	hole_node = drm_mm_search_free_in_range_generic(mm,
+							size, alignment, color,
+							start, end, 0);
 	if (!hole_node)
 		return -ENOSPC;
 
-	drm_mm_insert_helper_range(hole_node, node, size, alignment,
+	drm_mm_insert_helper_range(hole_node, node,
+				   size, alignment, color,
 				   start, end);
-
 	return 0;
 }
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+				unsigned long size, unsigned alignment,
+				unsigned long start, unsigned long end)
+{
+	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
+}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
+
+/**
+ * Remove a memory node from the allocator.
+ */
 void drm_mm_remove_node(struct drm_mm_node *node)
 {
 	struct drm_mm *mm = node->mm;
 	struct drm_mm_node *prev_node;
 
-	KASSERT(!node->scanned_block && !node->scanned_prev_free
-	    && !node->scanned_next_free, ("node"));
+	BUG_ON(node->scanned_block || node->scanned_prev_free
+				   || node->scanned_next_free);
 
 	prev_node =
 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
 
 	if (node->hole_follows) {
-		KASSERT(drm_mm_hole_node_start(node)
-			!= drm_mm_hole_node_end(node), ("hole_follows"));
+		BUG_ON(drm_mm_hole_node_start(node)
+				== drm_mm_hole_node_end(node));
 		list_del(&node->hole_stack);
 	} else
-		KASSERT(drm_mm_hole_node_start(node)
-		       == drm_mm_hole_node_end(node), ("!hole_follows"));
+		BUG_ON(drm_mm_hole_node_start(node)
+				!= drm_mm_hole_node_end(node));
 
 	if (!prev_node->hole_follows) {
 		prev_node->hole_follows = 1;
@@ -285,14 +341,16 @@
 	list_del(&node->node_list);
 	node->allocated = 0;
 }
+EXPORT_SYMBOL(drm_mm_remove_node);
 
 /*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
+ * Remove a memory node from the allocator and free the allocated struct
+ * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
+ * drm_mm_get_block functions.
  */
-
 void drm_mm_put_block(struct drm_mm_node *node)
 {
+
 	struct drm_mm *mm = node->mm;
 
 	drm_mm_remove_node(node);
@@ -305,12 +363,11 @@
 		free(node, DRM_MEM_MM);
 	mtx_unlock(&mm->unused_lock);
 }
+EXPORT_SYMBOL(drm_mm_put_block);
 
 static int check_free_hole(unsigned long start, unsigned long end,
 			   unsigned long size, unsigned alignment)
 {
-	unsigned wasted = 0;
-
 	if (end - start < size)
 		return 0;
 
@@ -317,33 +374,39 @@
 	if (alignment) {
 		unsigned tmp = start % alignment;
 		if (tmp)
-			wasted = alignment - tmp;
+			start += alignment - tmp;
 	}
 
-	if (end >= start + size + wasted) {
-		return 1;
-	}
-
-	return 0;
+	return end >= start + size;
 }
 
-
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
-				       unsigned long size,
-				       unsigned alignment, int best_match)
+struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+					       unsigned long size,
+					       unsigned alignment,
+					       unsigned long color,
+					       bool best_match)
 {
 	struct drm_mm_node *entry;
 	struct drm_mm_node *best;
 	unsigned long best_size;
 
+	BUG_ON(mm->scanned_blocks);
+
 	best = NULL;
 	best_size = ~0UL;
 
 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
-		KASSERT(entry->hole_follows, ("hole_follows"));
-		if (!check_free_hole(drm_mm_hole_node_start(entry),
-				     drm_mm_hole_node_end(entry),
-				     size, alignment))
+		unsigned long adj_start = drm_mm_hole_node_start(entry);
+		unsigned long adj_end = drm_mm_hole_node_end(entry);
+
+		if (mm->color_adjust) {
+			mm->color_adjust(entry, color, &adj_start, &adj_end);
+			if (adj_end <= adj_start)
+				continue;
+		}
+
+		BUG_ON(!entry->hole_follows);
+		if (!check_free_hole(adj_start, adj_end, size, alignment))
 			continue;
 
 		if (!best_match)
@@ -357,19 +420,21 @@
 
 	return best;
 }
+EXPORT_SYMBOL(drm_mm_search_free_generic);
 
-struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
-						unsigned long size,
-						unsigned alignment,
-						unsigned long start,
-						unsigned long end,
-						int best_match)
+struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+							unsigned long size,
+							unsigned alignment,
+							unsigned long color,
+							unsigned long start,
+							unsigned long end,
+							bool best_match)
 {
 	struct drm_mm_node *entry;
 	struct drm_mm_node *best;
 	unsigned long best_size;
 
-	KASSERT(!mm->scanned_blocks, ("scanned"));
+	BUG_ON(mm->scanned_blocks);
 
 	best = NULL;
 	best_size = ~0UL;
@@ -380,7 +445,14 @@
 		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
 			end : drm_mm_hole_node_end(entry);
 
-		KASSERT(entry->hole_follows, ("hole_follows"));
+		BUG_ON(!entry->hole_follows);
+
+		if (mm->color_adjust) {
+			mm->color_adjust(entry, color, &adj_start, &adj_end);
+			if (adj_end <= adj_start)
+				continue;
+		}
+
 		if (!check_free_hole(adj_start, adj_end, size, alignment))
 			continue;
 
@@ -395,7 +467,11 @@
 
 	return best;
 }
+EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
 
+/**
+ * Moves an allocation. To be used with embedded struct drm_mm_node.
+ */
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
 	list_replace(&old->node_list, &new->node_list);
@@ -404,50 +480,83 @@
 	new->mm = old->mm;
 	new->start = old->start;
 	new->size = old->size;
+	new->color = old->color;
 
 	old->allocated = 0;
 	new->allocated = 1;
 }
+EXPORT_SYMBOL(drm_mm_replace_node);
 
-void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
-		      unsigned alignment)
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan(struct drm_mm *mm,
+		      unsigned long size,
+		      unsigned alignment,
+		      unsigned long color)
 {
+	mm->scan_color = color;
 	mm->scan_alignment = alignment;
 	mm->scan_size = size;
 	mm->scanned_blocks = 0;
 	mm->scan_hit_start = 0;
-	mm->scan_hit_size = 0;
+	mm->scan_hit_end = 0;
 	mm->scan_check_range = 0;
 	mm->prev_scanned_node = NULL;
 }
+EXPORT_SYMBOL(drm_mm_init_scan);
 
-void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+				 unsigned long size,
 				 unsigned alignment,
+				 unsigned long color,
 				 unsigned long start,
 				 unsigned long end)
 {
+	mm->scan_color = color;
 	mm->scan_alignment = alignment;
 	mm->scan_size = size;
 	mm->scanned_blocks = 0;
 	mm->scan_hit_start = 0;
-	mm->scan_hit_size = 0;
+	mm->scan_hit_end = 0;
 	mm->scan_start = start;
 	mm->scan_end = end;
 	mm->scan_check_range = 1;
 	mm->prev_scanned_node = NULL;
 }
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
 
+/**
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+ * Returns non-zero, if a hole has been found, zero otherwise.
+ */
 int drm_mm_scan_add_block(struct drm_mm_node *node)
 {
 	struct drm_mm *mm = node->mm;
 	struct drm_mm_node *prev_node;
 	unsigned long hole_start, hole_end;
-	unsigned long adj_start;
-	unsigned long adj_end;
+	unsigned long adj_start, adj_end;
 
 	mm->scanned_blocks++;
 
-	KASSERT(!node->scanned_block, ("node->scanned_block"));
+	BUG_ON(node->scanned_block);
 	node->scanned_block = 1;
 
 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
@@ -460,29 +569,45 @@
 	node->node_list.next = &mm->prev_scanned_node->node_list;
 	mm->prev_scanned_node = node;
 
-	hole_start = drm_mm_hole_node_start(prev_node);
-	hole_end = drm_mm_hole_node_end(prev_node);
+	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
+
 	if (mm->scan_check_range) {
-		adj_start = hole_start < mm->scan_start ?
-			mm->scan_start : hole_start;
-		adj_end = hole_end > mm->scan_end ?
-			mm->scan_end : hole_end;
-	} else {
-		adj_start = hole_start;
-		adj_end = hole_end;
+		if (adj_start < mm->scan_start)
+			adj_start = mm->scan_start;
+		if (adj_end > mm->scan_end)
+			adj_end = mm->scan_end;
 	}
 
-	if (check_free_hole(adj_start , adj_end,
+	if (mm->color_adjust)
+		mm->color_adjust(prev_node, mm->scan_color,
+				 &adj_start, &adj_end);
+
+	if (check_free_hole(adj_start, adj_end,
 			    mm->scan_size, mm->scan_alignment)) {
 		mm->scan_hit_start = hole_start;
-		mm->scan_hit_size = hole_end;
-
+		mm->scan_hit_end = hole_end;
 		return 1;
 	}
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_mm_scan_add_block);
 
+/**
+ * Remove a node from the scan list.
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+ * corrupted.
+ *
+ * When the scan list is empty, the selected memory nodes can be freed. An
+ * immediately following drm_mm_search_free with best_match = 0 will then return
+ * the just freed block (because its at the top of the free_stack list).
+ *
+ * Returns one if this block should be evicted, zero otherwise. Will always
+ * return zero when no hole has been found.
+ */
 int drm_mm_scan_remove_block(struct drm_mm_node *node)
 {
 	struct drm_mm *mm = node->mm;
@@ -490,7 +615,7 @@
 
 	mm->scanned_blocks--;
 
-	KASSERT(node->scanned_block, ("scanned_block"));
+	BUG_ON(!node->scanned_block);
 	node->scanned_block = 0;
 
 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
@@ -497,20 +622,12 @@
 			       node_list);
 
 	prev_node->hole_follows = node->scanned_preceeds_hole;
-	INIT_LIST_HEAD(&node->node_list);
 	list_add(&node->node_list, &prev_node->node_list);
 
-	/* Only need to check for containement because start&size for the
-	 * complete resulting free block (not just the desired part) is
-	 * stored. */
-	if (node->start >= mm->scan_hit_start &&
-	    node->start + node->size
-	    		<= mm->scan_hit_start + mm->scan_hit_size) {
-		return 1;
-	}
-
-	return 0;
+	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+		 node->start < mm->scan_hit_end);
 }
+EXPORT_SYMBOL(drm_mm_scan_remove_block);
 
 int drm_mm_clean(struct drm_mm * mm)
 {
@@ -518,6 +635,7 @@
 
 	return (head->next->next == head);
 }
+EXPORT_SYMBOL(drm_mm_clean);
 
 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
@@ -527,6 +645,7 @@
 	mm->scanned_blocks = 0;
 	mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
 
+	/* Clever trick to avoid a special case in the free hole tracking. */
 	INIT_LIST_HEAD(&mm->head_node.node_list);
 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
 	mm->head_node.hole_follows = 1;
@@ -538,8 +657,11 @@
 	mm->head_node.size = start - mm->head_node.start;
 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
+	mm->color_adjust = NULL;
+
 	return 0;
 }
+EXPORT_SYMBOL(drm_mm_init);
 
 void drm_mm_takedown(struct drm_mm * mm)
 {
@@ -558,10 +680,9 @@
 	}
 	mtx_unlock(&mm->unused_lock);
 
-	mtx_destroy(&mm->unused_lock);
-
-	KASSERT(mm->num_unused == 0, ("num_unused != 0"));
+	BUG_ON(mm->num_unused != 0);
 }
+EXPORT_SYMBOL(drm_mm_takedown);
 
 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 {
@@ -573,13 +694,13 @@
 	hole_end = drm_mm_hole_node_end(&mm->head_node);
 	hole_size = hole_end - hole_start;
 	if (hole_size)
-		printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
+		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
 			prefix, hole_start, hole_end,
 			hole_size);
 	total_free += hole_size;
 
 	drm_mm_for_each_node(entry, mm) {
-		printf("%s 0x%08lx-0x%08lx: %8lu: used\n",
+		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
 			prefix, entry->start, entry->start + entry->size,
 			entry->size);
 		total_used += entry->size;
@@ -588,7 +709,7 @@
 			hole_start = drm_mm_hole_node_start(entry);
 			hole_end = drm_mm_hole_node_end(entry);
 			hole_size = hole_end - hole_start;
-			printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
+			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
 				prefix, hole_start, hole_end,
 				hole_size);
 			total_free += hole_size;
@@ -596,6 +717,7 @@
 	}
 	total = total_free + total_used;
 
-	printf("%s total: %lu, used %lu free %lu\n", prefix, total,
+	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
 		total_used, total_free);
 }
+EXPORT_SYMBOL(drm_mm_debug_table);

Modified: trunk/sys/dev/drm2/drm_mm.h
===================================================================
--- trunk/sys/dev/drm2/drm_mm.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_mm.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -26,18 +26,20 @@
  *
  *
  **************************************************************************/
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_mm.h 249081 2013-04-04 05:39:37Z kib $");
-
 /*
  * Authors:
  * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  */
 
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_mm.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
 #ifndef _DRM_MM_H_
 #define _DRM_MM_H_
 
+/*
+ * Generic range manager structs
+ */
 #include <dev/drm2/drm_linux_list.h>
 
 struct drm_mm_node {
@@ -49,14 +51,17 @@
 	unsigned scanned_next_free : 1;
 	unsigned scanned_preceeds_hole : 1;
 	unsigned allocated : 1;
+	unsigned long color;
 	unsigned long start;
 	unsigned long size;
 	struct drm_mm *mm;
-	void *private;
 };
 
 struct drm_mm {
+	/* List of all memory nodes that immediately precede a free hole. */
 	struct list_head hole_stack;
+	/* head_node.node_list is the list of all memory nodes, ordered
+	 * according to the (increasing) start address of the memory node. */
 	struct drm_mm_node head_node;
 	struct list_head unused_nodes;
 	int num_unused;
@@ -63,13 +68,17 @@
 	struct mtx unused_lock;
 	unsigned int scan_check_range : 1;
 	unsigned scan_alignment;
+	unsigned long scan_color;
 	unsigned long scan_size;
 	unsigned long scan_hit_start;
-	unsigned scan_hit_size;
+	unsigned long scan_hit_end;
 	unsigned scanned_blocks;
 	unsigned long scan_start;
 	unsigned long scan_end;
 	struct drm_mm_node *prev_scanned_node;
+
+	void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
+			     unsigned long *start, unsigned long *end);
 };
 
 static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
@@ -79,7 +88,7 @@
 
 static inline bool drm_mm_initialized(struct drm_mm *mm)
 {
-	return (mm->hole_stack.next != NULL);
+	return mm->hole_stack.next;
 }
 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
 						&(mm)->head_node.node_list, \
@@ -90,8 +99,7 @@
 			struct drm_mm_node, node_list) : NULL; \
 	     entry != NULL; entry = next, \
 		next = entry ? list_entry(entry->node_list.next, \
-			struct drm_mm_node, node_list) : NULL)
-
+			struct drm_mm_node, node_list) : NULL) \
 /*
  * Basic range manager support (drm_mm.c)
  */
@@ -98,11 +106,13 @@
 extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
 						    unsigned long size,
 						    unsigned alignment,
+						    unsigned long color,
 						    int atomic);
 extern struct drm_mm_node *drm_mm_get_block_range_generic(
 						struct drm_mm_node *node,
 						unsigned long size,
 						unsigned alignment,
+						unsigned long color,
 						unsigned long start,
 						unsigned long end,
 						int atomic);
@@ -110,13 +120,13 @@
 						   unsigned long size,
 						   unsigned alignment)
 {
-	return drm_mm_get_block_generic(parent, size, alignment, 0);
+	return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
 }
 static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
 							  unsigned long size,
 							  unsigned alignment)
 {
-	return drm_mm_get_block_generic(parent, size, alignment, 1);
+	return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
 }
 static inline struct drm_mm_node *drm_mm_get_block_range(
 						struct drm_mm_node *parent,
@@ -125,9 +135,20 @@
 						unsigned long start,
 						unsigned long end)
 {
-	return drm_mm_get_block_range_generic(parent, size, alignment,
-						start, end, 0);
+	return drm_mm_get_block_range_generic(parent, size, alignment, 0,
+					      start, end, 0);
 }
+static inline struct drm_mm_node *drm_mm_get_color_block_range(
+						struct drm_mm_node *parent,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long color,
+						unsigned long start,
+						unsigned long end)
+{
+	return drm_mm_get_block_range_generic(parent, size, alignment, color,
+					      start, end, 0);
+}
 static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
 						struct drm_mm_node *parent,
 						unsigned long size,
@@ -135,38 +156,91 @@
 						unsigned long start,
 						unsigned long end)
 {
-	return drm_mm_get_block_range_generic(parent, size, alignment,
+	return drm_mm_get_block_range_generic(parent, size, alignment, 0,
 						start, end, 1);
 }
-extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
-			      unsigned long size, unsigned alignment);
+
+extern int drm_mm_insert_node(struct drm_mm *mm,
+			      struct drm_mm_node *node,
+			      unsigned long size,
+			      unsigned alignment);
 extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
 				       struct drm_mm_node *node,
-				       unsigned long size, unsigned alignment,
-				       unsigned long start, unsigned long end);
+				       unsigned long size,
+				       unsigned alignment,
+				       unsigned long start,
+				       unsigned long end);
+extern int drm_mm_insert_node_generic(struct drm_mm *mm,
+				      struct drm_mm_node *node,
+				      unsigned long size,
+				      unsigned alignment,
+				      unsigned long color);
+extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+				       struct drm_mm_node *node,
+				       unsigned long size,
+				       unsigned alignment,
+				       unsigned long color,
+				       unsigned long start,
+				       unsigned long end);
 extern void drm_mm_put_block(struct drm_mm_node *cur);
 extern void drm_mm_remove_node(struct drm_mm_node *node);
 extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
-					      unsigned long size,
-					      unsigned alignment,
-					      int best_match);
-extern struct drm_mm_node *drm_mm_search_free_in_range(
+extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+						      unsigned long size,
+						      unsigned alignment,
+						      unsigned long color,
+						      bool best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
 						const struct drm_mm *mm,
 						unsigned long size,
 						unsigned alignment,
+						unsigned long color,
 						unsigned long start,
 						unsigned long end,
-						int best_match);
-extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
+						bool best_match);
+static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+						     unsigned long size,
+						     unsigned alignment,
+						     bool best_match)
+{
+	return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
+}
+static inline  struct drm_mm_node *drm_mm_search_free_in_range(
+						const struct drm_mm *mm,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end,
+						bool best_match)
+{
+	return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
+						   start, end, best_match);
+}
+static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
+							   unsigned long size,
+							   unsigned alignment,
+							   unsigned long color,
+							   bool best_match)
+{
+	return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
+}
+static inline  struct drm_mm_node *drm_mm_search_free_in_range_color(
+						const struct drm_mm *mm,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long color,
+						unsigned long start,
+						unsigned long end,
+						bool best_match)
+{
+	return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
+						   start, end, best_match);
+}
+extern int drm_mm_init(struct drm_mm *mm,
+		       unsigned long start,
 		       unsigned long size);
 extern void drm_mm_takedown(struct drm_mm *mm);
 extern int drm_mm_clean(struct drm_mm *mm);
-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
-					 unsigned long size);
-extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
-				    unsigned long size, int atomic);
 extern int drm_mm_pre_get(struct drm_mm *mm);
 
 static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
@@ -174,15 +248,19 @@
 	return block->mm;
 }
 
-void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
-		      unsigned alignment);
-void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+void drm_mm_init_scan(struct drm_mm *mm,
+		      unsigned long size,
+		      unsigned alignment,
+		      unsigned long color);
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+				 unsigned long size,
 				 unsigned alignment,
+				 unsigned long color,
 				 unsigned long start,
 				 unsigned long end);
 int drm_mm_scan_add_block(struct drm_mm_node *node);
 int drm_mm_scan_remove_block(struct drm_mm_node *node);
 
-void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
+extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
 
 #endif

Modified: trunk/sys/dev/drm2/drm_mode.h
===================================================================
--- trunk/sys/dev/drm2/drm_mode.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_mode.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -24,12 +24,14 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  * IN THE SOFTWARE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_mode.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_mode.h 282199 2015-04-28 19:35:05Z dumbbell $
  */
 
 #ifndef _DRM_MODE_H
 #define _DRM_MODE_H
 
+#include <dev/drm2/drm_os_freebsd.h>
+
 #define DRM_DISPLAY_INFO_LEN	32
 #define DRM_CONNECTOR_NAME_LEN	32
 #define DRM_DISPLAY_MODE_LEN	32
@@ -85,41 +87,41 @@
 #define DRM_MODE_DIRTY_ANNOTATE 2
 
 struct drm_mode_modeinfo {
-	uint32_t clock;
-	uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
-	uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
+	__u32 clock;
+	__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
+	__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
 
-	uint32_t vrefresh;
+	__u32 vrefresh;
 
-	uint32_t flags;
-	uint32_t type;
+	__u32 flags;
+	__u32 type;
 	char name[DRM_DISPLAY_MODE_LEN];
 };
 
 struct drm_mode_card_res {
-	uint64_t fb_id_ptr;
-	uint64_t crtc_id_ptr;
-	uint64_t connector_id_ptr;
-	uint64_t encoder_id_ptr;
-	uint32_t count_fbs;
-	uint32_t count_crtcs;
-	uint32_t count_connectors;
-	uint32_t count_encoders;
-	uint32_t min_width, max_width;
-	uint32_t min_height, max_height;
+	__u64 fb_id_ptr;
+	__u64 crtc_id_ptr;
+	__u64 connector_id_ptr;
+	__u64 encoder_id_ptr;
+	__u32 count_fbs;
+	__u32 count_crtcs;
+	__u32 count_connectors;
+	__u32 count_encoders;
+	__u32 min_width, max_width;
+	__u32 min_height, max_height;
 };
 
 struct drm_mode_crtc {
-	uint64_t set_connectors_ptr;
-	uint32_t count_connectors;
+	__u64 set_connectors_ptr;
+	__u32 count_connectors;
 
-	uint32_t crtc_id; /**< Id */
-	uint32_t fb_id; /**< Id of framebuffer */
+	__u32 crtc_id; /**< Id */
+	__u32 fb_id; /**< Id of framebuffer */
 
-	uint32_t x, y; /**< Position on the frameuffer */
+	__u32 x, y; /**< Position on the frameuffer */
 
-	uint32_t gamma_size;
-	uint32_t mode_valid;
+	__u32 gamma_size;
+	__u32 mode_valid;
 	struct drm_mode_modeinfo mode;
 };
 
@@ -128,36 +130,36 @@
 
 /* Planes blend with or override other bits on the CRTC */
 struct drm_mode_set_plane {
-	uint32_t plane_id;
-	uint32_t crtc_id;
-	uint32_t fb_id; /* fb object contains surface format type */
-	uint32_t flags; /* see above flags */
+	__u32 plane_id;
+	__u32 crtc_id;
+	__u32 fb_id; /* fb object contains surface format type */
+	__u32 flags; /* see above flags */
 
 	/* Signed dest location allows it to be partially off screen */
-	int32_t crtc_x, crtc_y;
-	uint32_t crtc_w, crtc_h;
+	__s32 crtc_x, crtc_y;
+	__u32 crtc_w, crtc_h;
 
 	/* Source values are 16.16 fixed point */
-	uint32_t src_x, src_y;
-	uint32_t src_h, src_w;
+	__u32 src_x, src_y;
+	__u32 src_h, src_w;
 };
 
 struct drm_mode_get_plane {
-	uint32_t plane_id;
+	__u32 plane_id;
 
-	uint32_t crtc_id;
-	uint32_t fb_id;
+	__u32 crtc_id;
+	__u32 fb_id;
 
-	uint32_t possible_crtcs;
-	uint32_t gamma_size;
+	__u32 possible_crtcs;
+	__u32 gamma_size;
 
-	uint32_t count_format_types;
-	uint64_t format_type_ptr;
+	__u32 count_format_types;
+	__u64 format_type_ptr;
 };
 
 struct drm_mode_get_plane_res {
-	uint64_t plane_id_ptr;
-	uint32_t count_planes;
+	__u64 plane_id_ptr;
+	__u32 count_planes;
 };
 
 #define DRM_MODE_ENCODER_NONE	0
@@ -165,15 +167,16 @@
 #define DRM_MODE_ENCODER_TMDS	2
 #define DRM_MODE_ENCODER_LVDS	3
 #define DRM_MODE_ENCODER_TVDAC	4
+#define DRM_MODE_ENCODER_VIRTUAL 5
 
 struct drm_mode_get_encoder {
-	uint32_t encoder_id;
-	uint32_t encoder_type;
+	__u32 encoder_id;
+	__u32 encoder_type;
 
-	uint32_t crtc_id; /**< Id of crtc */
+	__u32 crtc_id; /**< Id of crtc */
 
-	uint32_t possible_crtcs;
-	uint32_t possible_clones;
+	__u32 possible_crtcs;
+	__u32 possible_clones;
 };
 
 /* This is for connectors with multiple signal types. */
@@ -202,26 +205,27 @@
 #define DRM_MODE_CONNECTOR_HDMIB	12
 #define DRM_MODE_CONNECTOR_TV		13
 #define DRM_MODE_CONNECTOR_eDP		14
+#define DRM_MODE_CONNECTOR_VIRTUAL      15
 
 struct drm_mode_get_connector {
 
-	uint64_t encoders_ptr;
-	uint64_t modes_ptr;
-	uint64_t props_ptr;
-	uint64_t prop_values_ptr;
+	__u64 encoders_ptr;
+	__u64 modes_ptr;
+	__u64 props_ptr;
+	__u64 prop_values_ptr;
 
-	uint32_t count_modes;
-	uint32_t count_props;
-	uint32_t count_encoders;
+	__u32 count_modes;
+	__u32 count_props;
+	__u32 count_encoders;
 
-	uint32_t encoder_id; /**< Current Encoder */
-	uint32_t connector_id; /**< Id */
-	uint32_t connector_type;
-	uint32_t connector_type_id;
+	__u32 encoder_id; /**< Current Encoder */
+	__u32 connector_id; /**< Id */
+	__u32 connector_type;
+	__u32 connector_type_id;
 
-	uint32_t connection;
-	uint32_t mm_width, mm_height; /**< HxW in millimeters */
-	uint32_t subpixel;
+	__u32 connection;
+	__u32 mm_width, mm_height; /**< HxW in millimeters */
+	__u32 subpixel;
 };
 
 #define DRM_MODE_PROP_PENDING	(1<<0)
@@ -229,53 +233,69 @@
 #define DRM_MODE_PROP_IMMUTABLE	(1<<2)
 #define DRM_MODE_PROP_ENUM	(1<<3) /* enumerated type with text strings */
 #define DRM_MODE_PROP_BLOB	(1<<4)
+#define DRM_MODE_PROP_BITMASK	(1<<5) /* bitmask of enumerated types */
 
 struct drm_mode_property_enum {
-	uint64_t value;
+	__u64 value;
 	char name[DRM_PROP_NAME_LEN];
 };
 
 struct drm_mode_get_property {
-	uint64_t values_ptr; /* values and blob lengths */
-	uint64_t enum_blob_ptr; /* enum and blob id ptrs */
+	__u64 values_ptr; /* values and blob lengths */
+	__u64 enum_blob_ptr; /* enum and blob id ptrs */
 
-	uint32_t prop_id;
-	uint32_t flags;
+	__u32 prop_id;
+	__u32 flags;
 	char name[DRM_PROP_NAME_LEN];
 
-	uint32_t count_values;
-	uint32_t count_enum_blobs;
+	__u32 count_values;
+	__u32 count_enum_blobs;
 };
 
 struct drm_mode_connector_set_property {
-	uint64_t value;
-	uint32_t prop_id;
-	uint32_t connector_id;
+	__u64 value;
+	__u32 prop_id;
+	__u32 connector_id;
 };
 
+struct drm_mode_obj_get_properties {
+	__u64 props_ptr;
+	__u64 prop_values_ptr;
+	__u32 count_props;
+	__u32 obj_id;
+	__u32 obj_type;
+};
+
+struct drm_mode_obj_set_property {
+	__u64 value;
+	__u32 prop_id;
+	__u32 obj_id;
+	__u32 obj_type;
+};
+
 struct drm_mode_get_blob {
-	uint32_t blob_id;
-	uint32_t length;
-	uint64_t data;
+	__u32 blob_id;
+	__u32 length;
+	__u64 data;
 };
 
 struct drm_mode_fb_cmd {
-	uint32_t fb_id;
-	uint32_t width, height;
-	uint32_t pitch;
-	uint32_t bpp;
-	uint32_t depth;
+	__u32 fb_id;
+	__u32 width, height;
+	__u32 pitch;
+	__u32 bpp;
+	__u32 depth;
 	/* driver specific handle */
-	uint32_t handle;
+	__u32 handle;
 };
 
-#define DRM_MODE_FB_INTERLACED	(1<<0 /* for interlaced framebuffers */
+#define DRM_MODE_FB_INTERLACED	(1<<0) /* for interlaced framebuffers */
 
 struct drm_mode_fb_cmd2 {
-	uint32_t fb_id;
-	uint32_t width, height;
-	uint32_t pixel_format; /* fourcc code from drm_fourcc.h */
-	uint32_t flags; /* see above flags */
+	__u32 fb_id;
+	__u32 width, height;
+	__u32 pixel_format; /* fourcc code from drm_fourcc.h */
+	__u32 flags; /* see above flags */
 
 	/*
 	 * In case of planar formats, this ioctl allows up to 4
@@ -291,9 +311,9 @@
 	 * offeset[1].  Note that offset[0] will generally
 	 * be 0.
 	 */
-	uint32_t handles[4];
-	uint32_t pitches[4]; /* pitch for each plane */
-	uint32_t offsets[4]; /* offset of each plane */
+	__u32 handles[4];
+	__u32 pitches[4]; /* pitch for each plane */
+	__u32 offsets[4]; /* offset of each plane */
 };
 
 #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@@ -330,23 +350,24 @@
  */
 
 struct drm_mode_fb_dirty_cmd {
-	uint32_t fb_id;
-	uint32_t flags;
-	uint32_t color;
-	uint32_t num_clips;
-	uint64_t clips_ptr;
+	__u32 fb_id;
+	__u32 flags;
+	__u32 color;
+	__u32 num_clips;
+	__u64 clips_ptr;
 };
 
 struct drm_mode_mode_cmd {
-	uint32_t connector_id;
+	__u32 connector_id;
 	struct drm_mode_modeinfo mode;
 };
 
-#define DRM_MODE_CURSOR_BO	(1<<0)
-#define DRM_MODE_CURSOR_MOVE	(1<<1)
+#define DRM_MODE_CURSOR_BO	0x01
+#define DRM_MODE_CURSOR_MOVE	0x02
+#define DRM_MODE_CURSOR_FLAGS	0x03
 
 /*
- * depending on the value in flags diffrent members are used.
+ * depending on the value in flags different members are used.
  *
  * CURSOR_BO uses
  *    crtc
@@ -360,24 +381,24 @@
  *    y
  */
 struct drm_mode_cursor {
-	uint32_t flags;
-	uint32_t crtc_id;
-	int32_t x;
-	int32_t y;
-	uint32_t width;
-	uint32_t height;
+	__u32 flags;
+	__u32 crtc_id;
+	__s32 x;
+	__s32 y;
+	__u32 width;
+	__u32 height;
 	/* driver specific handle */
-	uint32_t handle;
+	__u32 handle;
 };
 
 struct drm_mode_crtc_lut {
-	uint32_t crtc_id;
-	uint32_t gamma_size;
+	__u32 crtc_id;
+	__u32 gamma_size;
 
 	/* pointers to arrays */
-	uint64_t red;
-	uint64_t green;
-	uint64_t blue;
+	__u64 red;
+	__u64 green;
+	__u64 blue;
 };
 
 #define DRM_MODE_PAGE_FLIP_EVENT 0x01
@@ -406,11 +427,11 @@
  */
 
 struct drm_mode_crtc_page_flip {
-	uint32_t crtc_id;
-	uint32_t fb_id;
-	uint32_t flags;
-	uint32_t reserved;
-	uint64_t user_data;
+	__u32 crtc_id;
+	__u32 fb_id;
+	__u32 flags;
+	__u32 reserved;
+	__u64 user_data;
 };
 
 /* create a dumb scanout buffer */
@@ -428,14 +449,14 @@
 /* set up for mmap of a dumb scanout buffer */
 struct drm_mode_map_dumb {
 	/** Handle for the object being mapped. */
-	uint32_t handle;
-	uint32_t pad;
+	__u32 handle;
+	__u32 pad;
 	/**
 	 * Fake offset to use for subsequent mmap call
 	 *
 	 * This is a fixed-size type for 32/64 compatibility.
 	 */
-	uint64_t offset;
+	__u64 offset;
 };
 
 struct drm_mode_destroy_dumb {

Modified: trunk/sys/dev/drm2/drm_modes.c
===================================================================
--- trunk/sys/dev/drm2/drm_modes.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_modes.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -32,14 +32,11 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_modes.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_modes.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
 #include <dev/drm2/drm_crtc.h>
 
-#define	KHZ2PICOS(a)	(1000000000UL/(a))
-
 /**
  * drm_mode_debug_printmodeline - debug print a mode
  * @dev: DRM device
@@ -50,7 +47,7 @@
  *
  * Describe @mode using DRM_DEBUG.
  */
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
 {
 	DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
 			"0x%x 0x%x\n",
@@ -60,6 +57,7 @@
 		mode->vdisplay, mode->vsync_start,
 		mode->vsync_end, mode->vtotal, mode->type, mode->flags);
 }
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
 
 /**
  * drm_cvt_mode -create a modeline based on CVT algorithm
@@ -279,6 +277,7 @@
 
 	return drm_mode;
 }
+EXPORT_SYMBOL(drm_cvt_mode);
 
 /**
  * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
@@ -464,6 +463,7 @@
 
 	return drm_mode;
 }
+EXPORT_SYMBOL(drm_gtf_mode_complex);
 
 /**
  * drm_gtf_mode - create the modeline based on GTF algorithm
@@ -503,6 +503,7 @@
 	return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
 				    margins, 600, 40 * 2, 128, 20 * 2);
 }
+EXPORT_SYMBOL(drm_gtf_mode);
 
 /**
  * drm_mode_set_name - set the name on a mode
@@ -521,6 +522,7 @@
 		 mode->hdisplay, mode->vdisplay,
 		 interlaced ? "i" : "");
 }
+EXPORT_SYMBOL(drm_mode_set_name);
 
 /**
  * drm_mode_list_concat - move modes from one list to another
@@ -541,6 +543,7 @@
 		list_move_tail(entry, new);
 	}
 }
+EXPORT_SYMBOL(drm_mode_list_concat);
 
 /**
  * drm_mode_width - get the width of a mode
@@ -556,11 +559,12 @@
  * RETURNS:
  * @mode->hdisplay
  */
-int drm_mode_width(struct drm_display_mode *mode)
+int drm_mode_width(const struct drm_display_mode *mode)
 {
 	return mode->hdisplay;
 
 }
+EXPORT_SYMBOL(drm_mode_width);
 
 /**
  * drm_mode_height - get the height of a mode
@@ -576,10 +580,11 @@
  * RETURNS:
  * @mode->vdisplay
  */
-int drm_mode_height(struct drm_display_mode *mode)
+int drm_mode_height(const struct drm_display_mode *mode)
 {
 	return mode->vdisplay;
 }
+EXPORT_SYMBOL(drm_mode_height);
 
 /** drm_mode_hsync - get the hsync of a mode
  * @mode: mode
@@ -605,6 +610,7 @@
 
 	return calc_val;
 }
+EXPORT_SYMBOL(drm_mode_hsync);
 
 /**
  * drm_mode_vrefresh - get the vrefresh of a mode
@@ -646,6 +652,7 @@
 	}
 	return refresh;
 }
+EXPORT_SYMBOL(drm_mode_vrefresh);
 
 /**
  * drm_mode_set_crtcinfo - set CRTC modesetting parameters
@@ -679,8 +686,6 @@
 			p->crtc_vsync_end /= 2;
 			p->crtc_vtotal /= 2;
 		}
-
-		p->crtc_vtotal |= 1;
 	}
 
 	if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
@@ -701,13 +706,32 @@
 	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
 	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
 	p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-
-	p->crtc_hadjusted = false;
-	p->crtc_vadjusted = false;
 }
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
 
 
 /**
+ * drm_mode_copy - copy the mode
+ * @dst: mode to overwrite
+ * @src: mode to copy
+ *
+ * LOCKING:
+ * None.
+ *
+ * Copy an existing mode into another mode, preserving the object id
+ * of the destination mode.
+ */
+void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
+{
+	int id = dst->base.id;
+
+	*dst = *src;
+	dst->base.id = id;
+	INIT_LIST_HEAD(&dst->head);
+}
+EXPORT_SYMBOL(drm_mode_copy);
+
+/**
  * drm_mode_duplicate - allocate and duplicate an existing mode
  * @m: mode to duplicate
  *
@@ -721,18 +745,16 @@
 					    const struct drm_display_mode *mode)
 {
 	struct drm_display_mode *nmode;
-	int new_id;
 
 	nmode = drm_mode_create(dev);
 	if (!nmode)
 		return NULL;
 
-	new_id = nmode->base.id;
-	*nmode = *mode;
-	nmode->base.id = new_id;
-	INIT_LIST_HEAD(&nmode->head);
+	drm_mode_copy(nmode, mode);
+
 	return nmode;
 }
+EXPORT_SYMBOL(drm_mode_duplicate);
 
 /**
  * drm_mode_equal - test modes for equality
@@ -745,9 +767,9 @@
  * Check to see if @mode1 and @mode2 are equivalent.
  *
  * RETURNS:
- * true if the modes are equal, false otherwise.
+ * True if the modes are equal, false otherwise.
  */
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
 {
 	/* do clock check convert to PICOS so fb modes get matched
 	 * the same */
@@ -772,6 +794,7 @@
 
 	return false;
 }
+EXPORT_SYMBOL(drm_mode_equal);
 
 /**
  * drm_mode_validate_size - make sure modes adhere to size constraints
@@ -805,6 +828,7 @@
 			mode->status = MODE_VIRTUAL_Y;
 	}
 }
+EXPORT_SYMBOL(drm_mode_validate_size);
 
 /**
  * drm_mode_validate_clocks - validate modes against clock limits
@@ -841,6 +865,7 @@
 			mode->status = MODE_CLOCK_RANGE;
 	}
 }
+EXPORT_SYMBOL(drm_mode_validate_clocks);
 
 /**
  * drm_mode_prune_invalid - remove invalid modes from mode list
@@ -872,6 +897,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(drm_mode_prune_invalid);
 
 /**
  * drm_mode_compare - compare modes for favorability
@@ -902,6 +928,11 @@
 	diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
 	if (diff)
 		return diff;
+
+	diff = b->vrefresh - a->vrefresh;
+	if (diff)
+		return diff;
+
 	diff = b->clock - a->clock;
 	return diff;
 }
@@ -919,6 +950,7 @@
 {
 	drm_list_sort(NULL, mode_list, drm_mode_compare);
 }
+EXPORT_SYMBOL(drm_mode_sort);
 
 /**
  * drm_mode_connector_list_update - update the mode list for the connector
@@ -960,6 +992,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(drm_mode_connector_list_update);
 
 /**
  * drm_mode_parse_command_line_for_connector - parse command line for connector
@@ -987,7 +1020,7 @@
 	int i;
 	enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
 
-#ifdef XXX_CONFIG_FB
+#ifdef CONFIG_FB
 	if (!mode_option)
 		mode_option = fb_mode_option;
 #endif
@@ -1004,7 +1037,7 @@
 		case '@':
 			if (!refresh_specified && !bpp_specified &&
 			    !yres_specified && !cvt && !rb && was_digit) {
-				refresh = strtol(&name[i+1], NULL, 10);
+				refresh = simple_strtol(&name[i+1], NULL, 10);
 				refresh_specified = true;
 				was_digit = false;
 			} else
@@ -1013,7 +1046,7 @@
 		case '-':
 			if (!bpp_specified && !yres_specified && !cvt &&
 			    !rb && was_digit) {
-				bpp = strtol(&name[i+1], NULL, 10);
+				bpp = simple_strtol(&name[i+1], NULL, 10);
 				bpp_specified = true;
 				was_digit = false;
 			} else
@@ -1021,7 +1054,7 @@
 			break;
 		case 'x':
 			if (!yres_specified && was_digit) {
-				yres = strtol(&name[i+1], NULL, 10);
+				yres = simple_strtol(&name[i+1], NULL, 10);
 				yres_specified = true;
 				was_digit = false;
 			} else
@@ -1081,7 +1114,7 @@
 
 	if (i < 0 && yres_specified) {
 		char *ch;
-		xres = strtol(name, &ch, 10);
+		xres = simple_strtol(name, &ch, 10);
 		if ((ch != NULL) && (*ch == 'x'))
 			res_specified = true;
 		else
@@ -1092,7 +1125,8 @@
 	}
 done:
 	if (i >= 0) {
-		printf("parse error at position %i in video mode '%s'\n",
+		DRM_WARNING(
+			"parse error at position %i in video mode '%s'\n",
 			i, name);
 		mode->specified = false;
 		return false;
@@ -1121,6 +1155,7 @@
 
 	return true;
 }
+EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
 
 struct drm_display_mode *
 drm_mode_create_from_cmdline_mode(struct drm_device *dev,
@@ -1146,3 +1181,4 @@
 	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 	return mode;
 }
+EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);

Modified: trunk/sys/dev/drm2/drm_pci.c
===================================================================
--- trunk/sys/dev/drm2/drm_pci.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_pci.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,6 +1,21 @@
 /* $MidnightBSD$ */
-/*-
- * Copyright 2003 Eric Anholt.
+/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
+/**
+ * \file drm_pci.c
+ * \brief Functions and ioctls to manage PCI memory
+ *
+ * \warning These interfaces aren't stable yet.
+ *
+ * \todo Implement the remaining ioctl's for the PCI pools.
+ * \todo The wrappers here are so thin that they would be better off inlined..
+ *
+ * \author José Fonseca <jrfonseca at tungstengraphics.com>
+ * \author Leif Delgass <ldelgass at retinalburn.net>
+ */
+
+/*
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -17,23 +32,21 @@
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_pci.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_pci.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
-/**
- * \file drm_pci.h
- * \brief PCI consistent, DMA-accessible memory allocation.
- *
- * \author Eric Anholt <anholt at FreeBSD.org>
- */
-
 #include <dev/drm2/drmP.h>
 
+static int drm_msi = 1;	/* Enable by default. */
+SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
+SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
+    "Enable MSI interrupts for drm devices");
+
 /**********************************************************************/
 /** \name PCI memory */
 /*@{*/
@@ -51,12 +64,10 @@
 }
 
 /**
- * \brief Allocate a physically contiguous DMA-accessible consistent 
- * memory block.
+ * \brief Allocate a PCI consistent memory block, for DMA.
  */
-drm_dma_handle_t *
-drm_pci_alloc(struct drm_device *dev, size_t size,
-	      size_t align, dma_addr_t maxaddr)
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size,
+    size_t align, dma_addr_t maxaddr)
 {
 	drm_dma_handle_t *dmah;
 	int ret;
@@ -77,7 +88,9 @@
 	if (mtx_owned(&dev->dma_lock))
 	    DRM_ERROR("called while holding dma_lock\n");
 
-	ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */
+	ret = bus_dma_tag_create(
+	    bus_get_dma_tag(dev->dev), /* parent */
+	    align, 0, /* align, boundary */
 	    maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
 	    NULL, NULL, /* filtfunc, filtfuncargs */
 	    size, 1, size, /* maxsize, nsegs, maxsegsize */
@@ -108,11 +121,14 @@
 	return dmah;
 }
 
+EXPORT_SYMBOL(drm_pci_alloc);
+
 /**
- * \brief Free a DMA-accessible consistent memory block.
+ * \brief Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
  */
-void
-drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
 	if (dmah == NULL)
 		return;
@@ -119,8 +135,358 @@
 
 	bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
 	bus_dma_tag_destroy(dmah->tag);
+}
 
+/**
+ * \brief Free a PCI consistent memory block
+ */
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+	__drm_pci_free(dev, dmah);
 	free(dmah, DRM_MEM_DMA);
 }
 
-/*@}*/
+EXPORT_SYMBOL(drm_pci_free);
+
+static int drm_get_pci_domain(struct drm_device *dev)
+{
+	return dev->pci_domain;
+}
+
+static int drm_pci_get_irq(struct drm_device *dev)
+{
+
+	if (dev->irqr)
+		return (dev->irq);
+
+	dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
+	    &dev->irqrid, RF_SHAREABLE);
+	if (!dev->irqr) {
+		dev_err(dev->dev, "Failed to allocate IRQ\n");
+		return (0);
+	}
+
+	dev->irq = (int) rman_get_start(dev->irqr);
+
+	return (dev->irq);
+}
+
+static void drm_pci_free_irq(struct drm_device *dev)
+{
+	if (dev->irqr == NULL)
+		return;
+
+	bus_release_resource(dev->dev, SYS_RES_IRQ,
+	    dev->irqrid, dev->irqr);
+
+	dev->irqr = NULL;
+	dev->irq = 0;
+}
+
+static const char *drm_pci_get_name(struct drm_device *dev)
+{
+	return dev->driver->name;
+}
+
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+	int len, ret;
+	master->unique_len = 40;
+	master->unique_size = master->unique_len;
+	master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_NOWAIT);
+	if (master->unique == NULL)
+		return -ENOMEM;
+
+
+	len = snprintf(master->unique, master->unique_len,
+		       "pci:%04x:%02x:%02x.%d",
+		       dev->pci_domain,
+		       dev->pci_bus,
+		       dev->pci_slot,
+		       dev->pci_func);
+
+	if (len >= master->unique_len) {
+		DRM_ERROR("buffer overflow");
+		ret = -EINVAL;
+		goto err;
+	} else
+		master->unique_len = len;
+
+	return 0;
+err:
+	return ret;
+}
+
+int drm_pci_set_unique(struct drm_device *dev,
+		       struct drm_master *master,
+		       struct drm_unique *u)
+{
+	int domain, bus, slot, func, ret;
+
+	master->unique_len = u->unique_len;
+	master->unique_size = u->unique_len + 1;
+	master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_WAITOK);
+	if (!master->unique) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+		ret = -EFAULT;
+		goto err;
+	}
+
+	master->unique[master->unique_len] = '\0';
+
+	/* Return error if the busid submitted doesn't match the device's actual
+	 * busid.
+	 */
+	ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+	if (ret != 3) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	domain = bus >> 8;
+	bus &= 0xff;
+
+	if ((domain != dev->pci_domain) ||
+	    (bus != dev->pci_bus) ||
+	    (slot != dev->pci_slot) ||
+	    (func != dev->pci_func)) {
+		ret = -EINVAL;
+		goto err;
+	}
+	return 0;
+err:
+	return ret;
+}
+
+
+static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+{
+	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+	    (p->busnum & 0xff) != dev->pci_bus ||
+	    p->devnum != dev->pci_slot || p->funcnum != dev->pci_func)
+		return -EINVAL;
+
+	p->irq = dev->irq;
+
+	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+		  p->irq);
+	return 0;
+}
+
+int drm_pci_agp_init(struct drm_device *dev)
+{
+	if (drm_core_has_AGP(dev)) {
+		if (drm_pci_device_is_agp(dev))
+			dev->agp = drm_agp_init(dev);
+		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+		    && (dev->agp == NULL)) {
+			DRM_ERROR("Cannot initialize the agpgart module.\n");
+			return -EINVAL;
+		}
+		if (drm_core_has_MTRR(dev)) {
+			if (dev->agp && dev->agp->agp_info.ai_aperture_base != 0) {
+				if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
+				    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
+					dev->agp->agp_mtrr = 1;
+				else
+					dev->agp->agp_mtrr = -1;
+			}
+		}
+	}
+	return 0;
+}
+
+static struct drm_bus drm_pci_bus = {
+	.bus_type = DRIVER_BUS_PCI,
+	.get_irq = drm_pci_get_irq,
+	.free_irq = drm_pci_free_irq,
+	.get_name = drm_pci_get_name,
+	.set_busid = drm_pci_set_busid,
+	.set_unique = drm_pci_set_unique,
+	.irq_by_busid = drm_pci_irq_by_busid,
+	.agp_init = drm_pci_agp_init,
+};
+
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_pci_dev(device_t kdev, struct drm_device *dev,
+		    struct drm_driver *driver)
+{
+	int ret;
+
+	DRM_DEBUG("\n");
+
+	driver->bus = &drm_pci_bus;
+
+	dev->dev = kdev;
+
+	dev->pci_domain = pci_get_domain(dev->dev);
+	dev->pci_bus = pci_get_bus(dev->dev);
+	dev->pci_slot = pci_get_slot(dev->dev);
+	dev->pci_func = pci_get_function(dev->dev);
+
+	dev->pci_vendor = pci_get_vendor(dev->dev);
+	dev->pci_device = pci_get_device(dev->dev);
+	dev->pci_subvendor = pci_get_subvendor(dev->dev);
+	dev->pci_subdevice = pci_get_subdevice(dev->dev);
+
+	sx_xlock(&drm_global_mutex);
+
+	if ((ret = drm_fill_in_dev(dev, driver))) {
+		DRM_ERROR("Failed to fill in dev: %d\n", ret);
+		goto err_g1;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+		if (ret)
+			goto err_g2;
+	}
+
+	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+		goto err_g3;
+
+	if (dev->driver->load) {
+		ret = dev->driver->load(dev,
+		    dev->id_entry->driver_private);
+		if (ret)
+			goto err_g4;
+	}
+
+	/* setup the grouping for the legacy output */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_mode_group_init_legacy_group(dev,
+						&dev->primary->mode_group);
+		if (ret)
+			goto err_g5;
+	}
+
+#ifdef FREEBSD_NOTYET
+	list_add_tail(&dev->driver_item, &driver->device_list);
+#endif /* FREEBSD_NOTYET */
+
+	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+		 driver->name, driver->major, driver->minor, driver->patchlevel,
+		 driver->date, device_get_nameunit(dev->dev), dev->primary->index);
+
+	sx_xunlock(&drm_global_mutex);
+	return 0;
+
+err_g5:
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+err_g4:
+	drm_put_minor(&dev->primary);
+err_g3:
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_put_minor(&dev->control);
+err_g2:
+	drm_cancel_fill_in_dev(dev);
+err_g1:
+	sx_xunlock(&drm_global_mutex);
+	return ret;
+}
+EXPORT_SYMBOL(drm_get_pci_dev);
+
+int
+drm_pci_enable_msi(struct drm_device *dev)
+{
+	int msicount, ret;
+
+	if (!drm_msi)
+		return (-ENOENT);
+
+	msicount = pci_msi_count(dev->dev);
+	DRM_DEBUG("MSI count = %d\n", msicount);
+	if (msicount > 1)
+		msicount = 1;
+
+	ret = pci_alloc_msi(dev->dev, &msicount);
+	if (ret == 0) {
+		DRM_INFO("MSI enabled %d message(s)\n", msicount);
+		dev->msi_enabled = 1;
+		dev->irqrid = 1;
+	}
+
+	return (-ret);
+}
+
+void
+drm_pci_disable_msi(struct drm_device *dev)
+{
+
+	if (!dev->msi_enabled)
+		return;
+
+	pci_release_msi(dev->dev);
+	dev->msi_enabled = 0;
+	dev->irqrid = 0;
+}
+
+int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
+{
+	device_t root;
+	int pos;
+	u32 lnkcap = 0, lnkcap2 = 0;
+
+	*mask = 0;
+	if (!drm_pci_device_is_pcie(dev))
+		return -EINVAL;
+
+	root =
+	    device_get_parent( /* pcib             */
+	    device_get_parent( /* `-- pci          */
+	    device_get_parent( /*     `-- vgapci   */
+	    dev->dev)));       /*         `-- drmn */
+
+	pos = 0;
+	pci_find_cap(root, PCIY_EXPRESS, &pos);
+	if (!pos)
+		return -EINVAL;
+
+	/* we've been informed via and serverworks don't make the cut */
+	if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
+	    pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
+		return -EINVAL;
+
+	lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
+	lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
+
+	lnkcap &= PCIEM_LINK_CAP_MAX_SPEED;
+	lnkcap2 &= 0xfe;
+
+#define	PCI_EXP_LNKCAP2_SLS_2_5GB 0x02	/* Supported Link Speed 2.5GT/s */
+#define	PCI_EXP_LNKCAP2_SLS_5_0GB 0x04	/* Supported Link Speed 5.0GT/s */
+#define	PCI_EXP_LNKCAP2_SLS_8_0GB 0x08	/* Supported Link Speed 8.0GT/s */
+
+	if (lnkcap2) { /* PCIE GEN 3.0 */
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+			*mask |= DRM_PCIE_SPEED_25;
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+			*mask |= DRM_PCIE_SPEED_50;
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+			*mask |= DRM_PCIE_SPEED_80;
+	} else {
+		if (lnkcap & 1)
+			*mask |= DRM_PCIE_SPEED_25;
+		if (lnkcap & 2)
+			*mask |= DRM_PCIE_SPEED_50;
+	}
+
+	DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", pci_get_vendor(root), pci_get_device(root), lnkcap, lnkcap2);
+	return 0;
+}
+EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);

Modified: trunk/sys/dev/drm2/drm_pciids.h
===================================================================
--- trunk/sys/dev/drm2/drm_pciids.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_pciids.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,15 +1,356 @@
 /* $MidnightBSD$ */
 /*
- * $FreeBSD: release/9.2.0/sys/dev/drm2/drm_pciids.h 237662 2012-06-27 21:05:21Z gavin $
+ * $FreeBSD: stable/10/sys/dev/drm2/drm_pciids.h 282199 2015-04-28 19:35:05Z dumbbell $
  */
+
 /*
-   This file is auto-generated from the drm_pciids.txt in the DRM CVS
-   Please contact dri-devel at lists.sf.net to add new cards to this list
-*/
+ * Generated by gen-drm_pciids from:
+ *   o  previous FreeBSD's drm_pciids.h
+ *   o  Linux' drm_pciids.h
+ *   o  the PCI ID repository (http://pciids.sourceforge.net/)
+ *
+ * See tools/tools/drm/gen-drm_pciids.
+ */
+
+#define ffb_PCI_IDS \
+	{0, 0, 0, NULL}
+
+#define gamma_PCI_IDS \
+	{0x3D3D, 0x0008, 0, "3DLabs GLINT Gamma G1"}, \
+	{0, 0, 0, NULL}
+
+#define i810_PCI_IDS \
+	{0x8086, 0x1132, 0, "Intel i815 GMCH"}, \
+	{0x8086, 0x7121, 0, "Intel i810 GMCH"}, \
+	{0x8086, 0x7123, 0, "Intel i810-DC100 GMCH"}, \
+	{0x8086, 0x7125, 0, "Intel i810E GMCH"}, \
+	{0, 0, 0, NULL}
+
+#define i830_PCI_IDS \
+	{0x8086, 0x2562, 0, "Intel i845G GMCH"}, \
+	{0x8086, 0x2572, 0, "Intel i865G GMCH"}, \
+	{0x8086, 0x3577, 0, "Intel i830M GMCH"}, \
+	{0x8086, 0x3582, 0, "Intel i852GM/i855GM GMCH"}, \
+	{0, 0, 0, NULL}
+
+#define i915_PCI_IDS \
+	{0x8086, 0x0042, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
+	{0x8086, 0x0046, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
+	{0x8086, 0x0102, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
+	{0x8086, 0x0106, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
+	{0x8086, 0x010A, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
+	{0x8086, 0x0112, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
+	{0x8086, 0x0116, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
+	{0x8086, 0x0122, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
+	{0x8086, 0x0126, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
+	{0x8086, 0x0152, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \
+	{0x8086, 0x0156, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \
+	{0x8086, 0x015A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \
+	{0x8086, 0x0162, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \
+	{0x8086, 0x0166, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \
+	{0x8086, 0x016A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \
+	{0x8086, 0x0402, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
+	{0x8086, 0x0406, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \
+	{0x8086, 0x040A, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
+	{0x8086, 0x0412, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
+	{0x8086, 0x0416, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \
+	{0x8086, 0x041A, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
+	{0x8086, 0x0C16, CHIP_I9XX|CHIP_I915, "Intel Haswell (SDV)"}, \
+	{0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
+	{0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
+	{0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
+	{0x8086, 0x258A, CHIP_I9XX|CHIP_I915, "Intel E7221 (i915)"}, \
+	{0x8086, 0x2592, CHIP_I9XX|CHIP_I915, "Intel i915GM"}, \
+	{0x8086, 0x2772, CHIP_I9XX|CHIP_I915, "Intel i945G"}, \
+	{0x8086, 0x27A2, CHIP_I9XX|CHIP_I915, "Intel i945GM"}, \
+	{0x8086, 0x27AE, CHIP_I9XX|CHIP_I915, "Intel i945GME"}, \
+	{0x8086, 0x2972, CHIP_I9XX|CHIP_I965, "Intel i946GZ"}, \
+	{0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+	{0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
+	{0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+	{0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
+	{0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
+	{0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
+	{0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
+	{0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
+	{0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Mobile Intel® GM45 Express Chipset"}, \
+	{0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel Eaglelake"}, \
+	{0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45/Q43"}, \
+	{0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45/G43"}, \
+	{0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \
+	{0x8086, 0x2E42, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
+	{0x8086, 0x2E92, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
+	{0x8086, 0x3577, CHIP_I8XX, "Intel i830M GMCH"}, \
+	{0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
+	{0x8086, 0x358E, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
+	{0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \
+	{0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \
+	{0, 0, 0, NULL}
+
+#define imagine_PCI_IDS \
+	{0x105D, 0x2309, IMAGINE_128, "Imagine 128"}, \
+	{0x105D, 0x2339, IMAGINE_128_2, "Imagine 128-II"}, \
+	{0x105D, 0x493D, IMAGINE_T2R, "Ticket to Ride"}, \
+	{0x105D, 0x5348, IMAGINE_REV4, "Revolution IV"}, \
+	{0, 0, 0, NULL}
+
+#define mach64_PCI_IDS \
+	{0x1002, 0x4742, 0, "3D Rage Pro AGP 1X/2X"}, \
+	{0x1002, 0x4744, 0, "3D Rage Pro AGP 1X"}, \
+	{0x1002, 0x4749, 0, "3D Rage Pro"}, \
+	{0x1002, 0x474C, 0, "Rage XC"}, \
+	{0x1002, 0x474D, 0, "Rage XL AGP 2X"}, \
+	{0x1002, 0x474E, 0, "Rage XC AGP"}, \
+	{0x1002, 0x474F, 0, "Rage XL"}, \
+	{0x1002, 0x4750, 0, "3D Rage Pro 215GP"}, \
+	{0x1002, 0x4751, 0, "3D Rage Pro 215GQ"}, \
+	{0x1002, 0x4752, 0, "Rage XL"}, \
+	{0x1002, 0x4753, 0, "Rage XC"}, \
+	{0x1002, 0x4C42, 0, "3D Rage LT Pro AGP-133"}, \
+	{0x1002, 0x4C44, 0, "3D Rage LT Pro AGP-66"}, \
+	{0x1002, 0x4C49, 0, "3D Rage LT Pro"}, \
+	{0x1002, 0x4C4D, 0, "Rage Mobility P/M AGP 2X"}, \
+	{0x1002, 0x4C4E, 0, "Rage Mobility L AGP 2X"}, \
+	{0x1002, 0x4C50, 0, "3D Rage LT Pro"}, \
+	{0x1002, 0x4C51, 0, "3D Rage LT Pro"}, \
+	{0x1002, 0x4C52, 0, "Rage Mobility P/M"}, \
+	{0x1002, 0x4C53, 0, "Rage Mobility L"}, \
+	{0, 0, 0, NULL}
+
+#define mga_PCI_IDS \
+	{0x102B, 0x0520, MGA_CARD_TYPE_G200, "Matrox G200 (PCI)"}, \
+	{0x102B, 0x0521, MGA_CARD_TYPE_G200, "Matrox G200 (AGP)"}, \
+	{0x102B, 0x0525, MGA_CARD_TYPE_G400, "Matrox G400/G450 (AGP)"}, \
+	{0x102B, 0x2527, MGA_CARD_TYPE_G550, "Matrox G550 (AGP)"}, \
+	{0, 0, 0, NULL}
+
+#define nv_PCI_IDS \
+	{0x10DE, 0x0020, NV04, "NVidia RIVA TNT"}, \
+	{0x10DE, 0x0028, NV04, "NVidia RIVA TNT2"}, \
+	{0x10DE, 0x0029, NV04, "NVidia RIVA TNT2 Ultra"}, \
+	{0x10DE, 0x002A, NV04, "NVidia Unknown TNT2"}, \
+	{0x10DE, 0x002C, NV04, "NVidia Vanta"}, \
+	{0x10DE, 0x002D, NV04, "NVidia RIVA TNT2 Model 64"}, \
+	{0x10DE, 0x0040, NV40, "NVidia GeForce 6800 Ultra"}, \
+	{0x10DE, 0x0041, NV40, "NVidia GeForce 6800"}, \
+	{0x10DE, 0x0042, NV40, "NVidia GeForce 6800 LE"}, \
+	{0x10DE, 0x0043, NV40, "NVidia 0x0043"}, \
+	{0x10DE, 0x0045, NV40, "NVidia GeForce 6800 GT"}, \
+	{0x10DE, 0x0046, NV40, "NVidia GeForce 6800 GT"}, \
+	{0x10DE, 0x0049, NV40, "NVidia 0x0049"}, \
+	{0x10DE, 0x004E, NV40, "NVidia Quadro FX 4000"}, \
+	{0x10DE, 0x0090, NV40, "NVidia 0x0090"}, \
+	{0x10DE, 0x0091, NV40, "NVidia GeForce 7800 GTX"}, \
+	{0x10DE, 0x0092, NV40, "NVidia 0x0092"}, \
+	{0x10DE, 0x0093, NV40, "NVidia 0x0093"}, \
+	{0x10DE, 0x0094, NV40, "NVidia 0x0094"}, \
+	{0x10DE, 0x0098, NV40, "NVidia 0x0098"}, \
+	{0x10DE, 0x0099, NV40, "NVidia GeForce Go 7800 GTX"}, \
+	{0x10DE, 0x009C, NV40, "NVidia 0x009C"}, \
+	{0x10DE, 0x009D, NV40, "NVidia Quadro FX 4500"}, \
+	{0x10DE, 0x009E, NV40, "NVidia 0x009E"}, \
+	{0x10DE, 0x00A0, NV04, "NVidia Aladdin TNT2"}, \
+	{0x10DE, 0x00C0, NV40, "NVidia 0x00C0"}, \
+	{0x10DE, 0x00C1, NV40, "NVidia GeForce 6800"}, \
+	{0x10DE, 0x00C2, NV40, "NVidia GeForce 6800 LE"}, \
+	{0x10DE, 0x00C8, NV40, "NVidia GeForce Go 6800"}, \
+	{0x10DE, 0x00C9, NV40, "NVidia GeForce Go 6800 Ultra"}, \
+	{0x10DE, 0x00CC, NV40, "NVidia Quadro FX Go1400"}, \
+	{0x10DE, 0x00CD, NV40, "NVidia Quadro FX 3450/4000 SDI"}, \
+	{0x10DE, 0x00CE, NV40, "NVidia Quadro FX 1400"}, \
+	{0x10DE, 0x00F0, NV40, "Nvidia GeForce 6600 GT"}, \
+	{0x10DE, 0x00F1, NV40, "Nvidia GeForce 6600 GT"}, \
+	{0x10DE, 0x0100, NV10, "NVidia GeForce 256"}, \
+	{0x10DE, 0x0101, NV10, "NVidia GeForce DDR"}, \
+	{0x10DE, 0x0103, NV10, "NVidia Quadro"}, \
+	{0x10DE, 0x0110, NV10, "NVidia GeForce2 MX/MX 400"}, \
+	{0x10DE, 0x0111, NV10, "NVidia GeForce2 MX 100/200"}, \
+	{0x10DE, 0x0112, NV10, "NVidia GeForce2 Go"}, \
+	{0x10DE, 0x0113, NV10, "NVidia Quadro2 MXR/EX/Go"}, \
+	{0x10DE, 0x0140, NV40, "NVidia GeForce 6600 GT"}, \
+	{0x10DE, 0x0141, NV40, "NVidia GeForce 6600"}, \
+	{0x10DE, 0x0142, NV40, "NVidia GeForce 6600 LE"}, \
+	{0x10DE, 0x0143, NV40, "NVidia 0x0143"}, \
+	{0x10DE, 0x0144, NV40, "NVidia GeForce Go 6600"}, \
+	{0x10DE, 0x0145, NV40, "NVidia GeForce 6610 XL"}, \
+	{0x10DE, 0x0146, NV40, "NVidia GeForce Go 6600 TE/6200 TE"}, \
+	{0x10DE, 0x0147, NV40, "NVidia GeForce 6700 XL"}, \
+	{0x10DE, 0x0148, NV40, "NVidia GeForce Go 6600"}, \
+	{0x10DE, 0x0149, NV40, "NVidia GeForce Go 6600 GT"}, \
+	{0x10DE, 0x014B, NV40, "NVidia 0x014B"}, \
+	{0x10DE, 0x014C, NV40, "NVidia 0x014C"}, \
+	{0x10DE, 0x014D, NV40, "NVidia 0x014D"}, \
+	{0x10DE, 0x014E, NV40, "NVidia Quadro FX 540"}, \
+	{0x10DE, 0x014F, NV40, "NVidia GeForce 6200"}, \
+	{0x10DE, 0x0150, NV10, "NVidia GeForce2 GTS"}, \
+	{0x10DE, 0x0151, NV10, "NVidia GeForce2 Ti"}, \
+	{0x10DE, 0x0152, NV10, "NVidia GeForce2 Ultra"}, \
+	{0x10DE, 0x0153, NV10, "NVidia Quadro2 Pro"}, \
+	{0x10DE, 0x0160, NV40, "NVidia 0x0160"}, \
+	{0x10DE, 0x0161, NV40, "NVidia GeForce 6200 TurboCache(TM)"}, \
+	{0x10DE, 0x0162, NV40, "NVidia GeForce 6200SE TurboCache(TM)"}, \
+	{0x10DE, 0x0163, NV40, "NVidia 0x0163"}, \
+	{0x10DE, 0x0164, NV40, "NVidia GeForce Go 6200"}, \
+	{0x10DE, 0x0165, NV40, "NVidia Quadro NVS 285"}, \
+	{0x10DE, 0x0166, NV40, "NVidia GeForce Go 6400"}, \
+	{0x10DE, 0x0167, NV40, "NVidia GeForce Go 6200"}, \
+	{0x10DE, 0x0168, NV40, "NVidia GeForce Go 6400"}, \
+	{0x10DE, 0x0169, NV40, "NVidia 0x0169"}, \
+	{0x10DE, 0x016B, NV40, "NVidia 0x016B"}, \
+	{0x10DE, 0x016C, NV40, "NVidia 0x016C"}, \
+	{0x10DE, 0x016D, NV40, "NVidia 0x016D"}, \
+	{0x10DE, 0x016E, NV40, "NVidia 0x016E"}, \
+	{0x10DE, 0x0170, NV10, "NVidia GeForce4 MX 460"}, \
+	{0x10DE, 0x0171, NV10, "NVidia GeForce4 MX 440"}, \
+	{0x10DE, 0x0172, NV10, "NVidia GeForce4 MX 420"}, \
+	{0x10DE, 0x0173, NV10, "NVidia GeForce4 MX 440-SE"}, \
+	{0x10DE, 0x0174, NV10, "NVidia GeForce4 440 Go"}, \
+	{0x10DE, 0x0175, NV10, "NVidia GeForce4 420 Go"}, \
+	{0x10DE, 0x0176, NV10, "NVidia GeForce4 420 Go 32M"}, \
+	{0x10DE, 0x0177, NV10, "NVidia GeForce4 460 Go"}, \
+	{0x10DE, 0x0178, NV10, "NVidia Quadro4 550 XGL"}, \
+	{0x10DE, 0x0179, NV10, "NVidia GeForce4"}, \
+	{0x10DE, 0x017A, NV10, "NVidia Quadro4 NVS"}, \
+	{0x10DE, 0x017C, NV10, "NVidia Quadro4 500 GoGL"}, \
+	{0x10DE, 0x017D, NV10, "NVidia GeForce4 410 Go 16M"}, \
+	{0x10DE, 0x0181, NV10, "NVidia GeForce4 MX 440 with AGP8X"}, \
+	{0x10DE, 0x0182, NV10, "NVidia GeForce4 MX 440SE with AGP8X"}, \
+	{0x10DE, 0x0183, NV10, "NVidia GeForce4 MX 420 with AGP8X"}, \
+	{0x10DE, 0x0185, NV10, "NVidia GeForce4 MX 4000"}, \
+	{0x10DE, 0x0186, NV10, "NVidia GeForce4 448 Go"}, \
+	{0x10DE, 0x0187, NV10, "NVidia GeForce4 488 Go"}, \
+	{0x10DE, 0x0188, NV10, "NVidia Quadro4 580 XGL"}, \
+	{0x10DE, 0x0189, NV10, "NVidia GeForce4 MX with AGP8X (Mac)"}, \
+	{0x10DE, 0x018A, NV10, "NVidia Quadro4 280 NVS"}, \
+	{0x10DE, 0x018B, NV10, "NVidia Quadro4 380 XGL"}, \
+	{0x10DE, 0x018C, NV10, "NVidia Quadro NVS 50 PCI"}, \
+	{0x10DE, 0x018D, NV10, "NVidia GeForce4 448 Go"}, \
+	{0x10DE, 0x01A0, NV10, "NVidia GeForce2 Integrated GPU"}, \
+	{0x10DE, 0x01F0, NV10, "NVidia GeForce4 MX Integrated GPU"}, \
+	{0x10DE, 0x0200, NV20, "NVidia GeForce3"}, \
+	{0x10DE, 0x0201, NV20, "NVidia GeForce3 Ti 200"}, \
+	{0x10DE, 0x0202, NV20, "NVidia GeForce3 Ti 500"}, \
+	{0x10DE, 0x0203, NV20, "NVidia Quadro DCC"}, \
+	{0x10DE, 0x0210, NV40, "NVidia 0x0210"}, \
+	{0x10DE, 0x0211, NV40, "NVidia GeForce 6800"}, \
+	{0x10DE, 0x0212, NV40, "NVidia GeForce 6800 LE"}, \
+	{0x10DE, 0x0215, NV40, "NVidia GeForce 6800 GT"}, \
+	{0x10DE, 0x0220, NV40, "NVidia 0x0220"}, \
+	{0x10DE, 0x0221, NV40, "NVidia GeForce 6200"}, \
+	{0x10DE, 0x0222, NV40, "NVidia 0x0222"}, \
+	{0x10DE, 0x0228, NV40, "NVidia 0x0228"}, \
+	{0x10DE, 0x0250, NV20, "NVidia GeForce4 Ti 4600"}, \
+	{0x10DE, 0x0251, NV20, "NVidia GeForce4 Ti 4400"}, \
+	{0x10DE, 0x0252, NV20, "NVidia 0x0252"}, \
+	{0x10DE, 0x0253, NV20, "NVidia GeForce4 Ti 4200"}, \
+	{0x10DE, 0x0258, NV20, "NVidia Quadro4 900 XGL"}, \
+	{0x10DE, 0x0259, NV20, "NVidia Quadro4 750 XGL"}, \
+	{0x10DE, 0x025B, NV20, "NVidia Quadro4 700 XGL"}, \
+	{0x10DE, 0x0280, NV20, "NVidia GeForce4 Ti 4800"}, \
+	{0x10DE, 0x0281, NV20, "NVidia GeForce4 Ti 4200 with AGP8X"}, \
+	{0x10DE, 0x0282, NV20, "NVidia GeForce4 Ti 4800 SE"}, \
+	{0x10DE, 0x0286, NV20, "NVidia GeForce4 4200 Go"}, \
+	{0x10DE, 0x0288, NV20, "NVidia Quadro4 980 XGL"}, \
+	{0x10DE, 0x0289, NV20, "NVidia Quadro4 780 XGL"}, \
+	{0x10DE, 0x028C, NV20, "NVidia Quadro4 700 GoGL"}, \
+	{0x10DE, 0x0301, NV30, "NVidia GeForce FX 5800 Ultra"}, \
+	{0x10DE, 0x0302, NV30, "NVidia GeForce FX 5800"}, \
+	{0x10DE, 0x0308, NV30, "NVidia Quadro FX 2000"}, \
+	{0x10DE, 0x0309, NV30, "NVidia Quadro FX 1000"}, \
+	{0x10DE, 0x0311, NV30, "NVidia GeForce FX 5600 Ultra"}, \
+	{0x10DE, 0x0312, NV30, "NVidia GeForce FX 5600"}, \
+	{0x10DE, 0x0313, NV30, "NVidia 0x0313"}, \
+	{0x10DE, 0x0314, NV30, "NVidia GeForce FX 5600SE"}, \
+	{0x10DE, 0x0316, NV30, "NVidia 0x0316"}, \
+	{0x10DE, 0x0317, NV30, "NVidia 0x0317"}, \
+	{0x10DE, 0x031A, NV30, "NVidia GeForce FX Go5600"}, \
+	{0x10DE, 0x031B, NV30, "NVidia GeForce FX Go5650"}, \
+	{0x10DE, 0x031C, NV30, "NVidia Quadro FX Go700"}, \
+	{0x10DE, 0x031D, NV30, "NVidia 0x031D"}, \
+	{0x10DE, 0x031E, NV30, "NVidia 0x031E"}, \
+	{0x10DE, 0x031F, NV30, "NVidia 0x031F"}, \
+	{0x10DE, 0x0320, NV30, "NVidia GeForce FX 5200"}, \
+	{0x10DE, 0x0321, NV30, "NVidia GeForce FX 5200 Ultra"}, \
+	{0x10DE, 0x0322, NV30, "NVidia GeForce FX 5200"}, \
+	{0x10DE, 0x0323, NV30, "NVidia GeForce FX 5200SE"}, \
+	{0x10DE, 0x0324, NV30, "NVidia GeForce FX Go5200"}, \
+	{0x10DE, 0x0325, NV30, "NVidia GeForce FX Go5250"}, \
+	{0x10DE, 0x0326, NV30, "NVidia GeForce FX 5500"}, \
+	{0x10DE, 0x0327, NV30, "NVidia GeForce FX 5100"}, \
+	{0x10DE, 0x0328, NV30, "NVidia GeForce FX Go5200 32M/64M"}, \
+	{0x10DE, 0x0329, NV30, "NVidia GeForce FX 5200 (Mac)"}, \
+	{0x10DE, 0x032A, NV30, "NVidia Quadro NVS 280 PCI"}, \
+	{0x10DE, 0x032B, NV30, "NVidia Quadro FX 500/600 PCI"}, \
+	{0x10DE, 0x032C, NV30, "NVidia GeForce FX Go53xx Series"}, \
+	{0x10DE, 0x032D, NV30, "NVidia GeForce FX Go5100"}, \
+	{0x10DE, 0x032F, NV30, "NVidia 0x032F"}, \
+	{0x10DE, 0x0330, NV30, "NVidia GeForce FX 5900 Ultra"}, \
+	{0x10DE, 0x0331, NV30, "NVidia GeForce FX 5900"}, \
+	{0x10DE, 0x0332, NV30, "NVidia GeForce FX 5900XT"}, \
+	{0x10DE, 0x0333, NV30, "NVidia GeForce FX 5950 Ultra"}, \
+	{0x10DE, 0x0334, NV30, "NVidia GeForce FX 5900ZT"}, \
+	{0x10DE, 0x0338, NV30, "NVidia Quadro FX 3000"}, \
+	{0x10DE, 0x033F, NV30, "NVidia Quadro FX 700"}, \
+	{0x10DE, 0x0341, NV30, "NVidia GeForce FX 5700 Ultra"}, \
+	{0x10DE, 0x0342, NV30, "NVidia GeForce FX 5700"}, \
+	{0x10DE, 0x0343, NV30, "NVidia GeForce FX 5700LE"}, \
+	{0x10DE, 0x0344, NV30, "NVidia GeForce FX 5700VE"}, \
+	{0x10DE, 0x0345, NV30, "NVidia 0x0345"}, \
+	{0x10DE, 0x0347, NV30, "NVidia GeForce FX Go5700"}, \
+	{0x10DE, 0x0348, NV30, "NVidia GeForce FX Go5700"}, \
+	{0x10DE, 0x0349, NV30, "NVidia 0x0349"}, \
+	{0x10DE, 0x034B, NV30, "NVidia 0x034B"}, \
+	{0x10DE, 0x034C, NV30, "NVidia Quadro FX Go1000"}, \
+	{0x10DE, 0x034E, NV30, "NVidia Quadro FX 1100"}, \
+	{0x10DE, 0x034F, NV30, "NVidia 0x034F"}, \
+	{0, 0, 0, NULL}
+
+#define r128_PCI_IDS \
+	{0x1002, 0x4C45, 0, "ATI Rage 128 Mobility LE (PCI)"}, \
+	{0x1002, 0x4C46, 0, "ATI Rage 128 Mobility LF (AGP)"}, \
+	{0x1002, 0x4D46, 0, "ATI Rage 128 Mobility MF (AGP)"}, \
+	{0x1002, 0x4D4C, 0, "ATI Rage 128 Mobility ML (AGP)"}, \
+	{0x1002, 0x5041, 0, "ATI Rage 128 Pro PA (PCI)"}, \
+	{0x1002, 0x5042, 0, "ATI Rage 128 Pro PB (AGP)"}, \
+	{0x1002, 0x5043, 0, "ATI Rage 128 Pro PC (AGP)"}, \
+	{0x1002, 0x5044, 0, "ATI Rage 128 Pro PD (PCI)"}, \
+	{0x1002, 0x5045, 0, "ATI Rage 128 Pro PE (AGP)"}, \
+	{0x1002, 0x5046, 0, "ATI Rage 128 Pro PF (AGP)"}, \
+	{0x1002, 0x5047, 0, "ATI Rage 128 Pro PG (PCI)"}, \
+	{0x1002, 0x5048, 0, "ATI Rage 128 Pro PH (AGP)"}, \
+	{0x1002, 0x5049, 0, "ATI Rage 128 Pro PI (AGP)"}, \
+	{0x1002, 0x504A, 0, "ATI Rage 128 Pro PJ (PCI)"}, \
+	{0x1002, 0x504B, 0, "ATI Rage 128 Pro PK (AGP)"}, \
+	{0x1002, 0x504C, 0, "ATI Rage 128 Pro PL (AGP)"}, \
+	{0x1002, 0x504D, 0, "ATI Rage 128 Pro PM (PCI)"}, \
+	{0x1002, 0x504E, 0, "ATI Rage 128 Pro PN (AGP)"}, \
+	{0x1002, 0x504F, 0, "ATI Rage 128 Pro PO (AGP)"}, \
+	{0x1002, 0x5050, 0, "ATI Rage 128 Pro PP (PCI)"}, \
+	{0x1002, 0x5051, 0, "ATI Rage 128 Pro PQ (AGP)"}, \
+	{0x1002, 0x5052, 0, "ATI Rage 128 Pro PR (PCI)"}, \
+	{0x1002, 0x5053, 0, "ATI Rage 128 Pro PS (PCI)"}, \
+	{0x1002, 0x5054, 0, "ATI Rage 128 Pro PT (AGP)"}, \
+	{0x1002, 0x5055, 0, "ATI Rage 128 Pro PU (AGP)"}, \
+	{0x1002, 0x5056, 0, "ATI Rage 128 Pro PV (PCI)"}, \
+	{0x1002, 0x5057, 0, "ATI Rage 128 Pro PW (AGP)"}, \
+	{0x1002, 0x5058, 0, "ATI Rage 128 Pro PX (AGP)"}, \
+	{0x1002, 0x5245, 0, "ATI Rage 128 RE (PCI)"}, \
+	{0x1002, 0x5246, 0, "ATI Rage 128 RF (AGP)"}, \
+	{0x1002, 0x5247, 0, "ATI Rage 128 RG (AGP)"}, \
+	{0x1002, 0x524B, 0, "ATI Rage 128 RK (PCI)"}, \
+	{0x1002, 0x524C, 0, "ATI Rage 128 RL (AGP)"}, \
+	{0x1002, 0x534D, 0, "ATI Rage 128 SM (AGP)"}, \
+	{0x1002, 0x5446, 0, "ATI Rage 128 Pro Ultra TF (AGP)"}, \
+	{0x1002, 0x544C, 0, "ATI Rage 128 Pro Ultra TL (AGP)"}, \
+	{0x1002, 0x5452, 0, "ATI Rage 128 Pro Ultra TR (AGP)"}, \
+	{0, 0, 0, NULL}
+
 #define radeon_PCI_IDS \
 	{0x1002, 0x3150, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X600 M24"}, \
+	{0x1002, 0x3151, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "M24 [FireMV 2400]"}, \
 	{0x1002, 0x3152, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X300 M24"}, \
 	{0x1002, 0x3154, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FireGL M24 GL"}, \
+	{0x1002, 0x3155, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "RV380 [FireMV 2400]"}, \
 	{0x1002, 0x3E50, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV380 X600"}, \
 	{0x1002, 0x3E54, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3200 RV380"}, \
 	{0x1002, 0x4136, CHIP_RS100|RADEON_IS_IGP, "ATI Radeon RS100 IGP 320"}, \
@@ -47,6 +388,7 @@
 	{0x1002, 0x4A4F, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JO R420 X800 SE"}, \
 	{0x1002, 0x4A50, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JP R420 X800 XT PE"}, \
 	{0x1002, 0x4A54, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JT R420 AIW X800 VE"}, \
+	{0x1002, 0x4B48, CHIP_R420|RADEON_NEW_MEMMAP, "R481 [Radeon X850 PCIe]"}, \
 	{0x1002, 0x4B49, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 XT"}, \
 	{0x1002, 0x4B4A, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 SE"}, \
 	{0x1002, 0x4B4B, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 Pro"}, \
@@ -58,6 +400,7 @@
 	{0x1002, 0x4C64, CHIP_RV250|RADEON_IS_MOBILITY, "ATI Radeon Ld RV250 Mobility 9000 M9"}, \
 	{0x1002, 0x4C66, CHIP_RV250, "ATI Radeon Lf RV250 Mobility 9000 M9 / FireMV 2400 PCI"}, \
 	{0x1002, 0x4C67, CHIP_RV250|RADEON_IS_MOBILITY, "ATI Radeon Lg RV250 Mobility 9000 M9"}, \
+	{0x1002, 0x4C6E, CHIP_RV280|RADEON_IS_MOBILITY, "Radeon RV250 Ln [Radeon Mobility 9000 M9] (Secondary)"}, \
 	{0x1002, 0x4E44, CHIP_R300, "ATI Radeon ND R300 9700 Pro"}, \
 	{0x1002, 0x4E45, CHIP_R300, "ATI Radeon NE R300 9500 Pro / 9700"}, \
 	{0x1002, 0x4E46, CHIP_R300, "ATI Radeon NF R300 9600TX"}, \
@@ -109,8 +452,6 @@
 	{0x1002, 0x5835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS300 Mobility IGP"}, \
 	{0x1002, 0x5954, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI RS480 XPRESS 200G"}, \
 	{0x1002, 0x5955, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5955"}, \
-	{0x1002, 0x5974, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS482 XPRESS 200"}, \
-	{0x1002, 0x5975, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS485 XPRESS 1100 IGP"}, \
 	{0x1002, 0x5960, CHIP_RV280, "ATI Radeon RV280 9250"}, \
 	{0x1002, 0x5961, CHIP_RV280, "ATI Radeon RV280 9200"}, \
 	{0x1002, 0x5962, CHIP_RV280, "ATI Radeon RV280 9200"}, \
@@ -117,33 +458,196 @@
 	{0x1002, 0x5964, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
 	{0x1002, 0x5965, CHIP_RV280, "ATI FireMV 2200 PCI"}, \
 	{0x1002, 0x5969, CHIP_RV100, "ATI ES1000 RN50"}, \
-	{0x1002, 0x5a41, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200 5A41 (PCIE)"}, \
-	{0x1002, 0x5a42, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5A42 (PCIE)"}, \
-	{0x1002, 0x5a61, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200"}, \
-	{0x1002, 0x5a62, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200M"}, \
-	{0x1002, 0x5b60, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X300 SE"}, \
-	{0x1002, 0x5b62, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X600 Pro"}, \
-	{0x1002, 0x5b63, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X550"}, \
-	{0x1002, 0x5b64, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3100 (RV370) 5B64"}, \
-	{0x1002, 0x5b65, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireMV 2200 PCIE (RV370) 5B65"}, \
-	{0x1002, 0x5c61, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
-	{0x1002, 0x5c63, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
-	{0x1002, 0x5d48, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 XT M28"}, \
-	{0x1002, 0x5d49, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5100 M28"}, \
-	{0x1002, 0x5d4a, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 M28"}, \
-	{0x1002, 0x5d4c, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850"}, \
-	{0x1002, 0x5d4d, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT PE"}, \
-	{0x1002, 0x5d4e, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 SE"}, \
-	{0x1002, 0x5d4f, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 Pro"}, \
-	{0x1002, 0x5d50, CHIP_R423|RADEON_NEW_MEMMAP, "ATI unknown Radeon / FireGL R480"}, \
-	{0x1002, 0x5d52, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT"}, \
-	{0x1002, 0x5d57, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 XT"}, \
-	{0x1002, 0x5e48, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI FireGL V5000 RV410"}, \
-	{0x1002, 0x5e4a, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 XT"}, \
-	{0x1002, 0x5e4b, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 Pro"}, \
-	{0x1002, 0x5e4c, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
-	{0x1002, 0x5e4d, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700"}, \
-	{0x1002, 0x5e4f, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
+	{0x1002, 0x5974, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS482 XPRESS 200"}, \
+	{0x1002, 0x5975, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS485 XPRESS 1100 IGP"}, \
+	{0x1002, 0x5A41, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200 5A41 (PCIE)"}, \
+	{0x1002, 0x5A42, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5A42 (PCIE)"}, \
+	{0x1002, 0x5A61, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200"}, \
+	{0x1002, 0x5A62, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200M"}, \
+	{0x1002, 0x5B60, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X300 SE"}, \
+	{0x1002, 0x5B62, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X600 Pro"}, \
+	{0x1002, 0x5B63, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X550"}, \
+	{0x1002, 0x5B64, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3100 (RV370) 5B64"}, \
+	{0x1002, 0x5B65, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireMV 2200 PCIE (RV370) 5B65"}, \
+	{0x1002, 0x5C61, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
+	{0x1002, 0x5C63, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
+	{0x1002, 0x5D48, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 XT M28"}, \
+	{0x1002, 0x5D49, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5100 M28"}, \
+	{0x1002, 0x5D4A, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 M28"}, \
+	{0x1002, 0x5D4C, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850"}, \
+	{0x1002, 0x5D4D, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT PE"}, \
+	{0x1002, 0x5D4E, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 SE"}, \
+	{0x1002, 0x5D4F, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 Pro"}, \
+	{0x1002, 0x5D50, CHIP_R423|RADEON_NEW_MEMMAP, "ATI unknown Radeon / FireGL R480"}, \
+	{0x1002, 0x5D52, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT"}, \
+	{0x1002, 0x5D57, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 XT"}, \
+	{0x1002, 0x5E48, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI FireGL V5000 RV410"}, \
+	{0x1002, 0x5E4A, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 XT"}, \
+	{0x1002, 0x5E4B, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 Pro"}, \
+	{0x1002, 0x5E4C, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
+	{0x1002, 0x5E4D, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700"}, \
+	{0x1002, 0x5E4F, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
+	{0x1002, 0x6700, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL XT [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6701, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL XT [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6702, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL XT [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6703, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL XT [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6704, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman PRO GL [FirePro V7900]"}, \
+	{0x1002, 0x6705, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL PRO [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6706, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6707, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman LE GL [FirePro V5900]"}, \
+	{0x1002, 0x6708, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6709, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6718, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman XT [Radeon HD 6970]"}, \
+	{0x1002, 0x6719, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman PRO [Radeon HD 6950]"}, \
+	{0x1002, 0x671C, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Antilles [Radeon HD 6990]"}, \
+	{0x1002, 0x671D, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Antilles [AMD Radeon HD 6990]"}, \
+	{0x1002, 0x671F, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman [Radeon HD 6900 Series]"}, \
+	{0x1002, 0x6720, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Blackcomb [Radeon HD 6900M series]"}, \
+	{0x1002, 0x6721, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Blackcomb [Mobility Radeon HD 6000 series]"}, \
+	{0x1002, 0x6722, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6723, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6724, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Blackcomb [Mobility Radeon HD 6000 series]"}, \
+	{0x1002, 0x6725, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Blackcomb [Mobility Radeon HD 6000 series]"}, \
+	{0x1002, 0x6726, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6727, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6728, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6729, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6738, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts XT [Radeon HD 6800 Series]"}, \
+	{0x1002, 0x6739, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts PRO [Radeon HD 6800 Series]"}, \
+	{0x1002, 0x673E, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts LE [AMD Radeon HD 6700 Series]"}, \
+	{0x1002, 0x6740, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler XT [AMD Radeon HD 6700M Series]"}, \
+	{0x1002, 0x6741, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler [AMD Radeon HD 6600M Series]"}, \
+	{0x1002, 0x6742, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler LE [AMD Radeon HD 6625M Graphics]"}, \
+	{0x1002, 0x6743, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler [Radeon E6760]"}, \
+	{0x1002, 0x6744, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler [ATI Mobility Radeon HD 6000 series]"}, \
+	{0x1002, 0x6745, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler"}, \
+	{0x1002, 0x6746, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6747, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6748, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6749, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [FirePro V4900]"}, \
+	{0x1002, 0x674A, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [AMD FirePro V3900]"}, \
+	{0x1002, 0x6750, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [AMD Radeon HD 6570]"}, \
+	{0x1002, 0x6751, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [Radeon HD 7600A Series]"}, \
+	{0x1002, 0x6758, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [Radeon HD 6670]"}, \
+	{0x1002, 0x6759, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [Radeon HD 6570]"}, \
+	{0x1002, 0x675B, CHIP_TURKS|RADEON_NEW_MEMMAP, "Unknown device name"}, \
+	{0x1002, 0x675D, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [Radeon HD 7500 Series]"}, \
+	{0x1002, 0x675F, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks LE [Radeon HD 5500/7510 Series]"}, \
+	{0x1002, 0x6760, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 6400M/7400M Series]"}, \
+	{0x1002, 0x6761, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Seymour LP [Radeon HD 6430M]"}, \
+	{0x1002, 0x6762, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6763, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Seymour [Radeon E6460]"}, \
+	{0x1002, 0x6764, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Seymour [Mobility Radeon HD 6000 series]"}, \
+	{0x1002, 0x6765, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Seymour [Mobility Radeon HD 6000 series]"}, \
+	{0x1002, 0x6766, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos"}, \
+	{0x1002, 0x6767, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos"}, \
+	{0x1002, 0x6768, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos"}, \
+	{0x1002, 0x6770, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 6400 Series]"}, \
+	{0x1002, 0x6771, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos"}, \
+	{0x1002, 0x6772, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 7400A Series]"}, \
+	{0x1002, 0x6778, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 7000 Series]"}, \
+	{0x1002, 0x6779, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 6450]"}, \
+	{0x1002, 0x677B, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 7400 Series]"}, \
+	{0x1002, 0x6780, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6784, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6788, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x678A, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [ATI FirePro V (FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6790, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti"}, \
+	{0x1002, 0x6791, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti"}, \
+	{0x1002, 0x6792, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti"}, \
+	{0x1002, 0x6798, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti XT [Radeon HD 7970]"}, \
+	{0x1002, 0x6799, CHIP_TAHITI|RADEON_NEW_MEMMAP, "New Zealand [Radeon HD 7990]"}, \
+	{0x1002, 0x679A, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti PRO [Radeon HD 7950]"}, \
+	{0x1002, 0x679B, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [Radeon HD 7900 Series]"}, \
+	{0x1002, 0x679E, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti LE [Radeon HD 7800 Series]"}, \
+	{0x1002, 0x679F, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti"}, \
+	{0x1002, 0x6800, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Wimbledon XT [Radeon HD 7970M]"}, \
+	{0x1002, 0x6801, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Wimbledon"}, \
+	{0x1002, 0x6802, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Wimbledon"}, \
+	{0x1002, 0x6806, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \
+	{0x1002, 0x6808, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn [ATI FirePro V(FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6809, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn [ATI FirePro V(FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6810, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \
+	{0x1002, 0x6811, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \
+	{0x1002, 0x6816, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \
+	{0x1002, 0x6817, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \
+	{0x1002, 0x6818, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn [Radeon HD 7800]"}, \
+	{0x1002, 0x6819, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn PRO [Radeon HD 7800]"}, \
+	{0x1002, 0x6820, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \
+	{0x1002, 0x6821, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \
+	{0x1002, 0x6822, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Venus PRO [Radeon E8860]"}, \
+	{0x1002, 0x6823, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \
+	{0x1002, 0x6824, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Chelsea [Radeon HD 7700M Series]"}, \
+	{0x1002, 0x6825, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \
+	{0x1002, 0x6826, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Chelsea [Radeon HD 7700M Series]"}, \
+	{0x1002, 0x6827, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \
+	{0x1002, 0x6828, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \
+	{0x1002, 0x6829, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \
+	{0x1002, 0x682A, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Venus PRO"}, \
+	{0x1002, 0x682B, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \
+	{0x1002, 0x682D, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Unknown device name"}, \
+	{0x1002, 0x682F, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7700M Series]"}, \
+	{0x1002, 0x6830, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \
+	{0x1002, 0x6831, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [AMD Radeon HD 7700M Series]"}, \
+	{0x1002, 0x6835, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde PRX [Radeon R9 255 OEM]"}, \
+	{0x1002, 0x6837, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde LE [Radeon HD 7700 Series]"}, \
+	{0x1002, 0x6838, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \
+	{0x1002, 0x6839, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \
+	{0x1002, 0x683B, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7700 Series]"}, \
+	{0x1002, 0x683D, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7700 Series]"}, \
+	{0x1002, 0x683F, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde PRO [Radeon HD 7700 Series]"}, \
+	{0x1002, 0x6840, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Thames XT/GL [Radeon HD 7600M Series]"}, \
+	{0x1002, 0x6841, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Thames [Radeon 7500M/7600M Series]"}, \
+	{0x1002, 0x6842, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Thames LE [Radeon HD 7000M Series]"}, \
+	{0x1002, 0x6843, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Thames [Radeon HD 7670M]"}, \
+	{0x1002, 0x6849, CHIP_TURKS|RADEON_NEW_MEMMAP, "Lombok [AMD Radeon HD 7400 Series]"}, \
+	{0x1002, 0x684C, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn [ATI FirePro V(FireGL V) Graphics Adapter]"}, \
+	{0x1002, 0x6850, CHIP_TURKS|RADEON_NEW_MEMMAP, "Lombok GL AIO [Radeon HD 7570]"}, \
+	{0x1002, 0x6858, CHIP_TURKS|RADEON_NEW_MEMMAP, "Lombok [Radeon HD 7400 series]"}, \
+	{0x1002, 0x6859, CHIP_TURKS|RADEON_NEW_MEMMAP, "Unknown device name"}, \
+	{0x1002, 0x6880, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cypress"}, \
+	{0x1002, 0x6888, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [FirePro 3D V8800]"}, \
+	{0x1002, 0x6889, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [FirePro V7800]"}, \
+	{0x1002, 0x688A, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress XT [FirePro 3D V9800]"}, \
+	{0x1002, 0x688C, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [AMD FireStream 9370]"}, \
+	{0x1002, 0x688D, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [AMD FireStream 9350]"}, \
+	{0x1002, 0x6898, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress XT [Radeon HD 5870]"}, \
+	{0x1002, 0x6899, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress PRO [Radeon HD 5800 Series]"}, \
+	{0x1002, 0x689B, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [Radeon HD 6800 Series]"}, \
+	{0x1002, 0x689C, CHIP_HEMLOCK|RADEON_NEW_MEMMAP, "Hemlock [Radeon HD 5900 Series]"}, \
+	{0x1002, 0x689D, CHIP_HEMLOCK|RADEON_NEW_MEMMAP, "Hemlock [ATI Radeon HD 5900 Series]"}, \
+	{0x1002, 0x689E, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress LE [Radeon HD 5800 Series]"}, \
+	{0x1002, 0x68A0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 5870"}, \
+	{0x1002, 0x68A1, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Broadway PRO [Mobility Radeon HD 5800 Series]"}, \
+	{0x1002, 0x68A8, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Broadway [ATI Mobility Radeon HD 6800 Series]"}, \
+	{0x1002, 0x68A9, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper XT [FirePro 3D V5800]"}, \
+	{0x1002, 0x68B0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Unknown device name"}, \
+	{0x1002, 0x68B8, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper [Radeon HD 5700 Series]"}, \
+	{0x1002, 0x68B9, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper [Radeon HD 5600/5700]"}, \
+	{0x1002, 0x68BA, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper XT [AMD Radeon HD 6000 Series]"}, \
+	{0x1002, 0x68BE, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper [Radeon HD 5700 Series]"}, \
+	{0x1002, 0x68BF, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper LE [Radeon HD 6700 Series]"}, \
+	{0x1002, 0x68C0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Madison [Mobility Radeon HD 5000 Series]"}, \
+	{0x1002, 0x68C1, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Madison [Radeon HD 5000M Series]"}, \
+	{0x1002, 0x68C7, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Pinewood [Radeon HD 5570]"}, \
+	{0x1002, 0x68C8, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "FirePro V4800"}, \
+	{0x1002, 0x68C9, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood [FirePro 3800 (FireGL)]"}, \
+	{0x1002, 0x68D8, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood [Radeon HD 5670]"}, \
+	{0x1002, 0x68D9, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood PRO [Radeon HD 5500 Series]"}, \
+	{0x1002, 0x68DA, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood PRO [Radeon HD 5500 Series]"}, \
+	{0x1002, 0x68DE, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood"}, \
+	{0x1002, 0x68E0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Manhattan [Mobility Radeon HD 5400 Series]"}, \
+	{0x1002, 0x68E1, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Manhattan [Mobility Radeon HD 5430 Series]"}, \
+	{0x1002, 0x68E4, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Robson CE [AMD Radeon HD 6300 Series]"}, \
+	{0x1002, 0x68E5, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Robson LE [AMD Radeon HD 6300M Series]"}, \
+	{0x1002, 0x68E8, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar"}, \
+	{0x1002, 0x68E9, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar [ATI FirePro (FireGL) Graphics Adapter]"}, \
+	{0x1002, 0x68F1, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar [FirePro 2460]"}, \
+	{0x1002, 0x68F2, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar [FirePro 2270]"}, \
+	{0x1002, 0x68F8, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar [Radeon HD 7300 Series]"}, \
+	{0x1002, 0x68F9, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar PRO [Radeon HD 5450/6350]"}, \
+	{0x1002, 0x68FA, CHIP_CEDAR|RADEON_NEW_MEMMAP, "EG Cedar [Radeon HD 7300 Series]"}, \
+	{0x1002, 0x68FE, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar LE"}, \
 	{0x1002, 0x7100, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
 	{0x1002, 0x7101, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1800 XT"}, \
 	{0x1002, 0x7102, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1800"}, \
@@ -241,15 +745,15 @@
 	{0x1002, 0x7297, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
 	{0x1002, 0x7834, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon RS350 9000/9100 IGP"}, \
 	{0x1002, 0x7835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon RS350 Mobility IGP"}, \
-	{0x1002, 0x793f, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \
+	{0x1002, 0x791E, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1250 IGP"}, \
+	{0x1002, 0x791F, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1270 IGP"}, \
+	{0x1002, 0x793F, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \
 	{0x1002, 0x7941, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \
 	{0x1002, 0x7942, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \
-	{0x1002, 0x791e, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1250 IGP"}, \
-	{0x1002, 0x791f, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1270 IGP"}, \
-	{0x1002, 0x796c, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
-	{0x1002, 0x796d, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
-	{0x1002, 0x796e, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
-	{0x1002, 0x796f, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
+	{0x1002, 0x796C, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
+	{0x1002, 0x796D, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
+	{0x1002, 0x796E, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
+	{0x1002, 0x796F, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
 	{0x1002, 0x9400, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \
 	{0x1002, 0x9401, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \
 	{0x1002, 0x9402, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \
@@ -258,6 +762,41 @@
 	{0x1002, 0x940A, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V8650"}, \
 	{0x1002, 0x940B, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V8600"}, \
 	{0x1002, 0x940F, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V7600"}, \
+	{0x1002, 0x9440, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+	{0x1002, 0x9441, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4870 X2"}, \
+	{0x1002, 0x9442, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+	{0x1002, 0x9443, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4850 X2"}, \
+	{0x1002, 0x9444, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V8750 (FireGL)"}, \
+	{0x1002, 0x9446, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V7760 (FireGL)"}, \
+	{0x1002, 0x944A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850"}, \
+	{0x1002, 0x944B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850 X2"}, \
+	{0x1002, 0x944C, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+	{0x1002, 0x944E, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro RV770"}, \
+	{0x1002, 0x9450, CHIP_RV770|RADEON_NEW_MEMMAP, "AMD FireStream 9270"}, \
+	{0x1002, 0x9452, CHIP_RV770|RADEON_NEW_MEMMAP, "AMD FireStream 9250"}, \
+	{0x1002, 0x9456, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V8700 (FireGL)"}, \
+	{0x1002, 0x945A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4870"}, \
+	{0x1002, 0x945B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon M98"}, \
+	{0x1002, 0x945E, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "RV770"}, \
+	{0x1002, 0x9460, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+	{0x1002, 0x9462, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+	{0x1002, 0x946A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M7750"}, \
+	{0x1002, 0x946B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
+	{0x1002, 0x947A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
+	{0x1002, 0x947B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
+	{0x1002, 0x9480, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4650"}, \
+	{0x1002, 0x9487, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon RV730 (AGP)"}, \
+	{0x1002, 0x9488, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4670"}, \
+	{0x1002, 0x9489, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M5750"}, \
+	{0x1002, 0x948A, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "RV730"}, \
+	{0x1002, 0x948F, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon RV730 (AGP)"}, \
+	{0x1002, 0x9490, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4670"}, \
+	{0x1002, 0x9491, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI RADEON E4600"}, \
+	{0x1002, 0x9495, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4600 Series"}, \
+	{0x1002, 0x9498, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4650"}, \
+	{0x1002, 0x949C, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V7750 (FireGL)"}, \
+	{0x1002, 0x949E, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V5700 (FireGL)"}, \
+	{0x1002, 0x949F, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V3750 (FireGL)"}, \
 	{0x1002, 0x94A0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4830"}, \
 	{0x1002, 0x94A1, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850"}, \
 	{0x1002, 0x94A3, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M7740"}, \
@@ -291,6 +830,16 @@
 	{0x1002, 0x9515, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3850"}, \
 	{0x1002, 0x9517, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3690"}, \
 	{0x1002, 0x9519, CHIP_RV670|RADEON_NEW_MEMMAP, "AMD Firestream 9170"}, \
+	{0x1002, 0x9540, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon HD 4550"}, \
+	{0x1002, 0x9541, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
+	{0x1002, 0x9542, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
+	{0x1002, 0x954E, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
+	{0x1002, 0x954F, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon HD 4350"}, \
+	{0x1002, 0x9552, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4300 Series"}, \
+	{0x1002, 0x9553, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4500 Series"}, \
+	{0x1002, 0x9555, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4500 Series"}, \
+	{0x1002, 0x9557, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro RG220"}, \
+	{0x1002, 0x955F, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "RV710 [Mobility Radeon HD 4330]"}, \
 	{0x1002, 0x9580, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI RV630"}, \
 	{0x1002, 0x9581, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2600"}, \
 	{0x1002, 0x9583, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2600 XT"}, \
@@ -304,26 +853,26 @@
 	{0x1002, 0x958D, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI FireGL V3600"}, \
 	{0x1002, 0x958E, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 LE"}, \
 	{0x1002, 0x958F, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL Graphics Processor"}, \
+	{0x1002, 0x9590, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 Series"}, \
+	{0x1002, 0x9591, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3650"}, \
+	{0x1002, 0x9593, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3670"}, \
+	{0x1002, 0x9595, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5700"}, \
+	{0x1002, 0x9596, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3650 AGP"}, \
+	{0x1002, 0x9597, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 PRO"}, \
+	{0x1002, 0x9598, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 XT"}, \
+	{0x1002, 0x9599, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 PRO"}, \
+	{0x1002, 0x959B, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5725"}, \
 	{0x1002, 0x95C0, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3470"}, \
+	{0x1002, 0x95C2, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3430"}, \
+	{0x1002, 0x95C4, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3400 Series"}, \
 	{0x1002, 0x95C5, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \
 	{0x1002, 0x95C6, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \
 	{0x1002, 0x95C7, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3430"}, \
 	{0x1002, 0x95C9, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \
-	{0x1002, 0x95C2, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3430"}, \
-	{0x1002, 0x95C4, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3400 Series"}, \
 	{0x1002, 0x95CC, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FirePro V3700"}, \
 	{0x1002, 0x95CD, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2450"}, \
 	{0x1002, 0x95CE, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2260"}, \
 	{0x1002, 0x95CF, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2260"}, \
-	{0x1002, 0x9590, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 Series"}, \
-	{0x1002, 0x9596, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3650 AGP"}, \
-	{0x1002, 0x9597, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 PRO"}, \
-	{0x1002, 0x9598, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 XT"}, \
-	{0x1002, 0x9599, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 PRO"}, \
-	{0x1002, 0x9591, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3650"}, \
-	{0x1002, 0x9593, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3670"}, \
-	{0x1002, 0x9595, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5700"}, \
-	{0x1002, 0x959B, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5725"}, \
 	{0x1002, 0x9610, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 3200 Graphics"}, \
 	{0x1002, 0x9611, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3100 Graphics"}, \
 	{0x1002, 0x9612, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 3200 Graphics"}, \
@@ -331,6 +880,20 @@
 	{0x1002, 0x9614, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3300 Graphics"}, \
 	{0x1002, 0x9615, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3200 Graphics"}, \
 	{0x1002, 0x9616, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3000 Graphics"}, \
+	{0x1002, 0x9640, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "BeaverCreek [Radeon HD 6550D]"}, \
+	{0x1002, 0x9641, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "BeaverCreek [Mobility Radeon HD 6620G]"}, \
+	{0x1002, 0x9642, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6370D]"}, \
+	{0x1002, 0x9643, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6380G]"}, \
+	{0x1002, 0x9644, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6410D]"}, \
+	{0x1002, 0x9645, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6410D]"}, \
+	{0x1002, 0x9647, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "BeaverCreek [Radeon HD 6520G]"}, \
+	{0x1002, 0x9648, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6480G]"}, \
+	{0x1002, 0x9649, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6480G]"}, \
+	{0x1002, 0x964A, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "BeaverCreek [Radeon HD 6530D]"}, \
+	{0x1002, 0x964B, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo"}, \
+	{0x1002, 0x964C, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo"}, \
+	{0x1002, 0x964E, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo"}, \
+	{0x1002, 0x964F, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo"}, \
 	{0x1002, 0x9710, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 4200"}, \
 	{0x1002, 0x9711, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 4100"}, \
 	{0x1002, 0x9712, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Mobility Radeon HD 4200"}, \
@@ -337,435 +900,113 @@
 	{0x1002, 0x9713, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Mobility Radeon 4100"}, \
 	{0x1002, 0x9714, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI RS880"}, \
 	{0x1002, 0x9715, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 4250"}, \
-	{0x1002, 0x9440, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
-	{0x1002, 0x9441, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4870 X2"}, \
-	{0x1002, 0x9442, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
-	{0x1002, 0x9443, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4850 X2"}, \
-	{0x1002, 0x944C, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
-	{0x1002, 0x9450, CHIP_RV770|RADEON_NEW_MEMMAP, "AMD FireStream 9270"}, \
-	{0x1002, 0x9452, CHIP_RV770|RADEON_NEW_MEMMAP, "AMD FireStream 9250"}, \
-	{0x1002, 0x9444, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V8750 (FireGL)"}, \
-	{0x1002, 0x9446, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V7760 (FireGL)"}, \
-	{0x1002, 0x9456, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V8700 (FireGL)"}, \
-	{0x1002, 0x944E, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro RV770"}, \
-	{0x1002, 0x944A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850"}, \
-	{0x1002, 0x944B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850 X2"}, \
-	{0x1002, 0x945A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4870"}, \
-	{0x1002, 0x945B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon M98"}, \
-	{0x1002, 0x9460, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
-	{0x1002, 0x9462, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
-	{0x1002, 0x946A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M7750"}, \
-	{0x1002, 0x946B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
-	{0x1002, 0x947A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
-	{0x1002, 0x947B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
-	{0x1002, 0x9487, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon RV730 (AGP)"}, \
-	{0x1002, 0x948F, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon RV730 (AGP)"}, \
-	{0x1002, 0x9490, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4670"}, \
-	{0x1002, 0x9495, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4600 Series"}, \
-	{0x1002, 0x9498, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4650"}, \
-	{0x1002, 0x9480, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4650"}, \
-	{0x1002, 0x9488, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4670"}, \
-	{0x1002, 0x9489, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M5750"}, \
-	{0x1002, 0x9491, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI RADEON E4600"}, \
-	{0x1002, 0x949C, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V7750 (FireGL)"}, \
-	{0x1002, 0x949E, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V5700 (FireGL)"}, \
-	{0x1002, 0x949F, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V3750 (FireGL)"}, \
-	{0x1002, 0x9540, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon HD 4550"}, \
-	{0x1002, 0x9541, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
-	{0x1002, 0x9542, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
-	{0x1002, 0x954E, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
-	{0x1002, 0x954F, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon HD 4350"}, \
-	{0x1002, 0x9552, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4300 Series"}, \
-	{0x1002, 0x9553, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4500 Series"}, \
-	{0x1002, 0x9555, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4500 Series"}, \
-	{0x1002, 0x9557, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro RG220"}, \
+	{0x1002, 0x9802, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6310]"}, \
+	{0x1002, 0x9803, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6310]"}, \
+	{0x1002, 0x9804, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6250]"}, \
+	{0x1002, 0x9805, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6250]"}, \
+	{0x1002, 0x9806, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6320]"}, \
+	{0x1002, 0x9807, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6290]"}, \
+	{0x1002, 0x9808, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 7340]"}, \
+	{0x1002, 0x9809, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 7310]"}, \
+	{0x1002, 0x980A, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 7290]"}, \
+	{0x1002, 0x9900, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Unknown device name"}, \
+	{0x1002, 0x9901, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7660D]"}, \
+	{0x1002, 0x9903, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7640G]"}, \
+	{0x1002, 0x9904, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7560D]"}, \
+	{0x1002, 0x9905, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [FirePro A300 Series Graphics]"}, \
+	{0x1002, 0x9906, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [FirePro A300 Series Graphics]"}, \
+	{0x1002, 0x9907, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7620G]"}, \
+	{0x1002, 0x9908, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7600G]"}, \
+	{0x1002, 0x9909, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7500G]"}, \
+	{0x1002, 0x990A, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7500G]"}, \
+	{0x1002, 0x990B, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8650G]"}, \
+	{0x1002, 0x990C, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8670D]"}, \
+	{0x1002, 0x990D, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8550G]"}, \
+	{0x1002, 0x990E, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8570D]"}, \
+	{0x1002, 0x990F, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Unknown device name"}, \
+	{0x1002, 0x9910, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7660G]"}, \
+	{0x1002, 0x9913, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7640G]"}, \
+	{0x1002, 0x9917, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7620G]"}, \
+	{0x1002, 0x9918, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7600G]"}, \
+	{0x1002, 0x9919, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7500G]"}, \
+	{0x1002, 0x9990, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7520G]"}, \
+	{0x1002, 0x9991, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7540D]"}, \
+	{0x1002, 0x9992, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7420G]"}, \
+	{0x1002, 0x9993, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7480D]"}, \
+	{0x1002, 0x9994, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7400G]"}, \
+	{0x1002, 0x9995, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8450G]"}, \
+	{0x1002, 0x9996, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8470D]"}, \
+	{0x1002, 0x9997, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8350G]"}, \
+	{0x1002, 0x9998, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8370D]"}, \
+	{0x1002, 0x9999, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8510G]"}, \
+	{0x1002, 0x999A, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8410G]"}, \
+	{0x1002, 0x999B, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8310G]"}, \
+	{0x1002, 0x999C, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland"}, \
+	{0x1002, 0x999D, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8550D]"}, \
+	{0x1002, 0x99A0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7520G]"}, \
+	{0x1002, 0x99A2, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7420G]"}, \
+	{0x1002, 0x99A4, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7400G]"}, \
 	{0, 0, 0, NULL}
 
-#define r128_PCI_IDS \
-	{0x1002, 0x4c45, 0, "ATI Rage 128 Mobility LE (PCI)"}, \
-	{0x1002, 0x4c46, 0, "ATI Rage 128 Mobility LF (AGP)"}, \
-	{0x1002, 0x4d46, 0, "ATI Rage 128 Mobility MF (AGP)"}, \
-	{0x1002, 0x4d4c, 0, "ATI Rage 128 Mobility ML (AGP)"}, \
-	{0x1002, 0x5041, 0, "ATI Rage 128 Pro PA (PCI)"}, \
-	{0x1002, 0x5042, 0, "ATI Rage 128 Pro PB (AGP)"}, \
-	{0x1002, 0x5043, 0, "ATI Rage 128 Pro PC (AGP)"}, \
-	{0x1002, 0x5044, 0, "ATI Rage 128 Pro PD (PCI)"}, \
-	{0x1002, 0x5045, 0, "ATI Rage 128 Pro PE (AGP)"}, \
-	{0x1002, 0x5046, 0, "ATI Rage 128 Pro PF (AGP)"}, \
-	{0x1002, 0x5047, 0, "ATI Rage 128 Pro PG (PCI)"}, \
-	{0x1002, 0x5048, 0, "ATI Rage 128 Pro PH (AGP)"}, \
-	{0x1002, 0x5049, 0, "ATI Rage 128 Pro PI (AGP)"}, \
-	{0x1002, 0x504A, 0, "ATI Rage 128 Pro PJ (PCI)"}, \
-	{0x1002, 0x504B, 0, "ATI Rage 128 Pro PK (AGP)"}, \
-	{0x1002, 0x504C, 0, "ATI Rage 128 Pro PL (AGP)"}, \
-	{0x1002, 0x504D, 0, "ATI Rage 128 Pro PM (PCI)"}, \
-	{0x1002, 0x504E, 0, "ATI Rage 128 Pro PN (AGP)"}, \
-	{0x1002, 0x504F, 0, "ATI Rage 128 Pro PO (AGP)"}, \
-	{0x1002, 0x5050, 0, "ATI Rage 128 Pro PP (PCI)"}, \
-	{0x1002, 0x5051, 0, "ATI Rage 128 Pro PQ (AGP)"}, \
-	{0x1002, 0x5052, 0, "ATI Rage 128 Pro PR (PCI)"}, \
-	{0x1002, 0x5053, 0, "ATI Rage 128 Pro PS (PCI)"}, \
-	{0x1002, 0x5054, 0, "ATI Rage 128 Pro PT (AGP)"}, \
-	{0x1002, 0x5055, 0, "ATI Rage 128 Pro PU (AGP)"}, \
-	{0x1002, 0x5056, 0, "ATI Rage 128 Pro PV (PCI)"}, \
-	{0x1002, 0x5057, 0, "ATI Rage 128 Pro PW (AGP)"}, \
-	{0x1002, 0x5058, 0, "ATI Rage 128 Pro PX (AGP)"}, \
-	{0x1002, 0x5245, 0, "ATI Rage 128 RE (PCI)"}, \
-	{0x1002, 0x5246, 0, "ATI Rage 128 RF (AGP)"}, \
-	{0x1002, 0x5247, 0, "ATI Rage 128 RG (AGP)"}, \
-	{0x1002, 0x524b, 0, "ATI Rage 128 RK (PCI)"}, \
-	{0x1002, 0x524c, 0, "ATI Rage 128 RL (AGP)"}, \
-	{0x1002, 0x534d, 0, "ATI Rage 128 SM (AGP)"}, \
-	{0x1002, 0x5446, 0, "ATI Rage 128 Pro Ultra TF (AGP)"}, \
-	{0x1002, 0x544C, 0, "ATI Rage 128 Pro Ultra TL (AGP)"}, \
-	{0x1002, 0x5452, 0, "ATI Rage 128 Pro Ultra TR (AGP)"}, \
+#define savage_PCI_IDS \
+	{0x5333, 0x8A20, S3_SAVAGE3D, "Savage 3D"}, \
+	{0x5333, 0x8A21, S3_SAVAGE3D, "Savage 3D/MV"}, \
+	{0x5333, 0x8A22, S3_SAVAGE4, "Savage4"}, \
+	{0x5333, 0x8A23, S3_SAVAGE4, "Savage4"}, \
+	{0x5333, 0x8A25, S3_PROSAVAGE, "ProSavage PM133"}, \
+	{0x5333, 0x8A26, S3_PROSAVAGE, "ProSavage KM133"}, \
+	{0x5333, 0x8C10, S3_SAVAGE_MX, "Savage/MX-MV"}, \
+	{0x5333, 0x8C11, S3_SAVAGE_MX, "Savage/MX"}, \
+	{0x5333, 0x8C12, S3_SAVAGE_MX, "Savage/IX-MV"}, \
+	{0x5333, 0x8C13, S3_SAVAGE_MX, "Savage/IX"}, \
+	{0x5333, 0x8C22, S3_SUPERSAVAGE, "SuperSavage MX/128"}, \
+	{0x5333, 0x8C24, S3_SUPERSAVAGE, "SuperSavage MX/64"}, \
+	{0x5333, 0x8C26, S3_SUPERSAVAGE, "SuperSavage MX/64C"}, \
+	{0x5333, 0x8C2A, S3_SUPERSAVAGE, "SuperSavage IX/128 SDR"}, \
+	{0x5333, 0x8C2B, S3_SUPERSAVAGE, "SuperSavage IX/128 DDR"}, \
+	{0x5333, 0x8C2C, S3_SUPERSAVAGE, "SuperSavage IX/64 SDR"}, \
+	{0x5333, 0x8C2D, S3_SUPERSAVAGE, "SuperSavage IX/64 DDR"}, \
+	{0x5333, 0x8C2E, S3_SUPERSAVAGE, "SuperSavage IX/C SDR"}, \
+	{0x5333, 0x8C2F, S3_SUPERSAVAGE, "SuperSavage IX/C DDR"}, \
+	{0x5333, 0x8D01, S3_TWISTER, "ProSavage Twister PN133"}, \
+	{0x5333, 0x8D02, S3_TWISTER, "ProSavage Twister KN133"}, \
+	{0x5333, 0x8D03, S3_PROSAVAGEDDR, "ProSavage DDR"}, \
+	{0x5333, 0x8D04, S3_PROSAVAGEDDR, "ProSavage DDR-K"}, \
 	{0, 0, 0, NULL}
 
-#define mga_PCI_IDS \
-	{0x102b, 0x0520, MGA_CARD_TYPE_G200, "Matrox G200 (PCI)"}, \
-	{0x102b, 0x0521, MGA_CARD_TYPE_G200, "Matrox G200 (AGP)"}, \
-	{0x102b, 0x0525, MGA_CARD_TYPE_G400, "Matrox G400/G450 (AGP)"}, \
-	{0x102b, 0x2527, MGA_CARD_TYPE_G550, "Matrox G550 (AGP)"}, \
-	{0, 0, 0, NULL}
-
-#define mach64_PCI_IDS \
-	{0x1002, 0x4749, 0, "3D Rage Pro"}, \
-	{0x1002, 0x4750, 0, "3D Rage Pro 215GP"}, \
-	{0x1002, 0x4751, 0, "3D Rage Pro 215GQ"}, \
-	{0x1002, 0x4742, 0, "3D Rage Pro AGP 1X/2X"}, \
-	{0x1002, 0x4744, 0, "3D Rage Pro AGP 1X"}, \
-	{0x1002, 0x4c49, 0, "3D Rage LT Pro"}, \
-	{0x1002, 0x4c50, 0, "3D Rage LT Pro"}, \
-	{0x1002, 0x4c51, 0, "3D Rage LT Pro"}, \
-	{0x1002, 0x4c42, 0, "3D Rage LT Pro AGP-133"}, \
-	{0x1002, 0x4c44, 0, "3D Rage LT Pro AGP-66"}, \
-	{0x1002, 0x474c, 0, "Rage XC"}, \
-	{0x1002, 0x474f, 0, "Rage XL"}, \
-	{0x1002, 0x4752, 0, "Rage XL"}, \
-	{0x1002, 0x4753, 0, "Rage XC"}, \
-	{0x1002, 0x474d, 0, "Rage XL AGP 2X"}, \
-	{0x1002, 0x474e, 0, "Rage XC AGP"}, \
-	{0x1002, 0x4c52, 0, "Rage Mobility P/M"}, \
-	{0x1002, 0x4c53, 0, "Rage Mobility L"}, \
-	{0x1002, 0x4c4d, 0, "Rage Mobility P/M AGP 2X"}, \
-	{0x1002, 0x4c4e, 0, "Rage Mobility L AGP 2X"}, \
-	{0, 0, 0, NULL}
-
 #define sis_PCI_IDS \
+	{0x18CA, 0x0040, SIS_CHIP_315, "Volari V3XT/V5/V8"}, \
+	{0x18CA, 0x0042, SIS_CHIP_315, "Volari Unknown"}, \
 	{0x1039, 0x0300, 0, "SiS 300/305"}, \
 	{0x1039, 0x5300, 0, "SiS 540"}, \
 	{0x1039, 0x6300, 0, "SiS 630"}, \
 	{0x1039, 0x6330, SIS_CHIP_315, "SiS 661"}, \
 	{0x1039, 0x7300, 0, "SiS 730"}, \
-	{0x18CA, 0x0040, SIS_CHIP_315, "Volari V3XT/V5/V8"}, \
-	{0x18CA, 0x0042, SIS_CHIP_315, "Volari Unknown"}, \
 	{0, 0, 0, NULL}
 
 #define tdfx_PCI_IDS \
-	{0x121a, 0x0003, 0, "3dfx Voodoo Banshee"}, \
-	{0x121a, 0x0004, 0, "3dfx Voodoo3 2000"}, \
-	{0x121a, 0x0005, 0, "3dfx Voodoo3 3000"}, \
-	{0x121a, 0x0007, 0, "3dfx Voodoo4 4500"}, \
-	{0x121a, 0x0009, 0, "3dfx Voodoo5 5500"}, \
-	{0x121a, 0x000b, 0, "3dfx Voodoo4 4200"}, \
+	{0x121A, 0x0003, 0, "3dfx Voodoo Banshee"}, \
+	{0x121A, 0x0004, 0, "3dfx Voodoo3 2000"}, \
+	{0x121A, 0x0005, 0, "3dfx Voodoo3 3000"}, \
+	{0x121A, 0x0007, 0, "3dfx Voodoo4 4500"}, \
+	{0x121A, 0x0009, 0, "3dfx Voodoo5 5500"}, \
+	{0x121A, 0x000B, 0, "3dfx Voodoo4 4200"}, \
 	{0, 0, 0, NULL}
 
 #define viadrv_PCI_IDS \
 	{0x1106, 0x3022, 0, "VIA CLE266 3022"}, \
+	{0x1106, 0x3108, 0, "VIA K8M800"}, \
 	{0x1106, 0x3118, VIA_PRO_GROUP_A, "VIA CN400 / PM8X0"}, \
 	{0x1106, 0x3122, 0, "VIA CLE266"}, \
-	{0x1106, 0x7205, 0, "VIA KM400"}, \
-	{0x1106, 0x3108, 0, "VIA K8M800"}, \
+	{0x1106, 0x3157, VIA_PRO_GROUP_A, "VIA CX700"}, \
+	{0x1106, 0x3230, VIA_DX9_0, "VIA K8M890"}, \
+	{0x1106, 0x3343, 0, "VIA P4M890"}, \
 	{0x1106, 0x3344, 0, "VIA CN700 / VM800 / P4M800Pro"}, \
-	{0x1106, 0x3343, 0, "VIA P4M890"}, \
-	{0x1106, 0x3230, VIA_DX9_0, "VIA K8M890"}, \
-	{0x1106, 0x3157, VIA_PRO_GROUP_A, "VIA CX700"}, \
 	{0x1106, 0x3371, VIA_DX9_0, "VIA P4M900 / VN896"}, \
+	{0x1106, 0x7205, 0, "VIA KM400"}, \
 	{0, 0, 0, NULL}
 
-#define i810_PCI_IDS \
-	{0x8086, 0x7121, 0, "Intel i810 GMCH"}, \
-	{0x8086, 0x7123, 0, "Intel i810-DC100 GMCH"}, \
-	{0x8086, 0x7125, 0, "Intel i810E GMCH"}, \
-	{0x8086, 0x1132, 0, "Intel i815 GMCH"}, \
-	{0, 0, 0, NULL}
-
-#define i830_PCI_IDS \
-	{0x8086, 0x3577, 0, "Intel i830M GMCH"}, \
-	{0x8086, 0x2562, 0, "Intel i845G GMCH"}, \
-	{0x8086, 0x3582, 0, "Intel i852GM/i855GM GMCH"}, \
-	{0x8086, 0x2572, 0, "Intel i865G GMCH"}, \
-	{0, 0, 0, NULL}
-
-#define gamma_PCI_IDS \
-	{0x3d3d, 0x0008, 0, "3DLabs GLINT Gamma G1"}, \
-	{0, 0, 0, NULL}
-
-#define savage_PCI_IDS \
-	{0x5333, 0x8a20, S3_SAVAGE3D, "Savage 3D"}, \
-	{0x5333, 0x8a21, S3_SAVAGE3D, "Savage 3D/MV"}, \
-	{0x5333, 0x8a22, S3_SAVAGE4, "Savage4"}, \
-	{0x5333, 0x8a23, S3_SAVAGE4, "Savage4"}, \
-	{0x5333, 0x8c10, S3_SAVAGE_MX, "Savage/MX-MV"}, \
-	{0x5333, 0x8c11, S3_SAVAGE_MX, "Savage/MX"}, \
-	{0x5333, 0x8c12, S3_SAVAGE_MX, "Savage/IX-MV"}, \
-	{0x5333, 0x8c13, S3_SAVAGE_MX, "Savage/IX"}, \
-	{0x5333, 0x8c22, S3_SUPERSAVAGE, "SuperSavage MX/128"}, \
-	{0x5333, 0x8c24, S3_SUPERSAVAGE, "SuperSavage MX/64"}, \
-	{0x5333, 0x8c26, S3_SUPERSAVAGE, "SuperSavage MX/64C"}, \
-	{0x5333, 0x8c2a, S3_SUPERSAVAGE, "SuperSavage IX/128 SDR"}, \
-	{0x5333, 0x8c2b, S3_SUPERSAVAGE, "SuperSavage IX/128 DDR"}, \
-	{0x5333, 0x8c2c, S3_SUPERSAVAGE, "SuperSavage IX/64 SDR"}, \
-	{0x5333, 0x8c2d, S3_SUPERSAVAGE, "SuperSavage IX/64 DDR"}, \
-	{0x5333, 0x8c2e, S3_SUPERSAVAGE, "SuperSavage IX/C SDR"}, \
-	{0x5333, 0x8c2f, S3_SUPERSAVAGE, "SuperSavage IX/C DDR"}, \
-	{0x5333, 0x8a25, S3_PROSAVAGE, "ProSavage PM133"}, \
-	{0x5333, 0x8a26, S3_PROSAVAGE, "ProSavage KM133"}, \
-	{0x5333, 0x8d01, S3_TWISTER, "ProSavage Twister PN133"}, \
-	{0x5333, 0x8d02, S3_TWISTER, "ProSavage Twister KN133"}, \
-	{0x5333, 0x8d03, S3_PROSAVAGEDDR, "ProSavage DDR"}, \
-	{0x5333, 0x8d04, S3_PROSAVAGEDDR, "ProSavage DDR-K"}, \
-	{0, 0, 0, NULL}
-
-#define ffb_PCI_IDS \
-	{0, 0, 0, NULL}
-
-#define i915_PCI_IDS \
-	{0x8086, 0x3577, CHIP_I8XX, "Intel i830M GMCH"}, \
-	{0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
-	{0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
-	{0x8086, 0x358e, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
-	{0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
-	{0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
-	{0x8086, 0x258a, CHIP_I9XX|CHIP_I915, "Intel E7221 (i915)"}, \
-	{0x8086, 0x2592, CHIP_I9XX|CHIP_I915, "Intel i915GM"}, \
-	{0x8086, 0x2772, CHIP_I9XX|CHIP_I915, "Intel i945G"}, \
-	{0x8086, 0x27A2, CHIP_I9XX|CHIP_I915, "Intel i945GM"}, \
-	{0x8086, 0x27AE, CHIP_I9XX|CHIP_I915, "Intel i945GME"}, \
-	{0x8086, 0x2972, CHIP_I9XX|CHIP_I965, "Intel i946GZ"}, \
-	{0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
-	{0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
-	{0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
-	{0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
-	{0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
-	{0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
-	{0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
-	{0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
-	{0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Mobile Intel® GM45 Express Chipset"}, \
-	{0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel Eaglelake"}, \
-	{0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45/Q43"}, \
-	{0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45/G43"}, \
-	{0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \
-	{0x8086, 0x2e42, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
-	{0x8086, 0x2e92, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
-	{0x8086, 0x0042, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
-	{0x8086, 0x0046, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
-	{0x8086, 0x0102, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
-	{0x8086, 0x0112, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
-	{0x8086, 0x0122, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
-	{0x8086, 0x0106, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
-	{0x8086, 0x0116, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
-	{0x8086, 0x0126, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
-	{0x8086, 0x010A, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
-	{0x8086, 0x0152, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \
-	{0x8086, 0x0162, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \
-	{0x8086, 0x0156, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \
-	{0x8086, 0x0166, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \
-	{0x8086, 0x015A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \
-	{0x8086, 0x016A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \
-	{0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \
-	{0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \
-	{0, 0, 0, NULL}
-
-#define imagine_PCI_IDS \
-	{0x105d, 0x2309, IMAGINE_128, "Imagine 128"}, \
-	{0x105d, 0x2339, IMAGINE_128_2, "Imagine 128-II"}, \
-	{0x105d, 0x493d, IMAGINE_T2R, "Ticket to Ride"}, \
-	{0x105d, 0x5348, IMAGINE_REV4, "Revolution IV"}, \
-	{0, 0, 0, NULL}
-
-#define nv_PCI_IDS \
-	{0x10DE, 0x0020, NV04, "NVidia RIVA TNT"}, \
-	{0x10DE, 0x0028, NV04, "NVidia RIVA TNT2"}, \
-	{0x10DE, 0x002A, NV04, "NVidia Unknown TNT2"}, \
-	{0x10DE, 0x002C, NV04, "NVidia Vanta"}, \
-	{0x10DE, 0x0029, NV04, "NVidia RIVA TNT2 Ultra"}, \
-	{0x10DE, 0x002D, NV04, "NVidia RIVA TNT2 Model 64"}, \
-	{0x10DE, 0x00A0, NV04, "NVidia Aladdin TNT2"}, \
-	{0x10DE, 0x0100, NV10, "NVidia GeForce 256"}, \
-	{0x10DE, 0x0101, NV10, "NVidia GeForce DDR"}, \
-	{0x10DE, 0x0103, NV10, "NVidia Quadro"}, \
-	{0x10DE, 0x0110, NV10, "NVidia GeForce2 MX/MX 400"}, \
-	{0x10DE, 0x0111, NV10, "NVidia GeForce2 MX 100/200"}, \
-	{0x10DE, 0x0112, NV10, "NVidia GeForce2 Go"}, \
-	{0x10DE, 0x0113, NV10, "NVidia Quadro2 MXR/EX/Go"}, \
-	{0x10DE, 0x0150, NV10, "NVidia GeForce2 GTS"}, \
-	{0x10DE, 0x0151, NV10, "NVidia GeForce2 Ti"}, \
-	{0x10DE, 0x0152, NV10, "NVidia GeForce2 Ultra"}, \
-	{0x10DE, 0x0153, NV10, "NVidia Quadro2 Pro"}, \
-	{0x10DE, 0x0170, NV10, "NVidia GeForce4 MX 460"}, \
-	{0x10DE, 0x0171, NV10, "NVidia GeForce4 MX 440"}, \
-	{0x10DE, 0x0172, NV10, "NVidia GeForce4 MX 420"}, \
-	{0x10DE, 0x0173, NV10, "NVidia GeForce4 MX 440-SE"}, \
-	{0x10DE, 0x0174, NV10, "NVidia GeForce4 440 Go"}, \
-	{0x10DE, 0x0175, NV10, "NVidia GeForce4 420 Go"}, \
-	{0x10DE, 0x0176, NV10, "NVidia GeForce4 420 Go 32M"}, \
-	{0x10DE, 0x0177, NV10, "NVidia GeForce4 460 Go"}, \
-	{0x10DE, 0x0178, NV10, "NVidia Quadro4 550 XGL"}, \
-	{0x10DE, 0x0179, NV10, "NVidia GeForce4"}, \
-	{0x10DE, 0x017A, NV10, "NVidia Quadro4 NVS"}, \
-	{0x10DE, 0x017C, NV10, "NVidia Quadro4 500 GoGL"}, \
-	{0x10DE, 0x017D, NV10, "NVidia GeForce4 410 Go 16M"}, \
-	{0x10DE, 0x0181, NV10, "NVidia GeForce4 MX 440 with AGP8X"}, \
-	{0x10DE, 0x0182, NV10, "NVidia GeForce4 MX 440SE with AGP8X"}, \
-	{0x10DE, 0x0183, NV10, "NVidia GeForce4 MX 420 with AGP8X"}, \
-	{0x10DE, 0x0185, NV10, "NVidia GeForce4 MX 4000"}, \
-	{0x10DE, 0x0186, NV10, "NVidia GeForce4 448 Go"}, \
-	{0x10DE, 0x0187, NV10, "NVidia GeForce4 488 Go"}, \
-	{0x10DE, 0x0188, NV10, "NVidia Quadro4 580 XGL"}, \
-	{0x10DE, 0x0189, NV10, "NVidia GeForce4 MX with AGP8X (Mac)"}, \
-	{0x10DE, 0x018A, NV10, "NVidia Quadro4 280 NVS"}, \
-	{0x10DE, 0x018B, NV10, "NVidia Quadro4 380 XGL"}, \
-	{0x10DE, 0x018C, NV10, "NVidia Quadro NVS 50 PCI"}, \
-	{0x10DE, 0x018D, NV10, "NVidia GeForce4 448 Go"}, \
-	{0x10DE, 0x01A0, NV10, "NVidia GeForce2 Integrated GPU"}, \
-	{0x10DE, 0x01F0, NV10, "NVidia GeForce4 MX Integrated GPU"}, \
-	{0x10DE, 0x0200, NV20, "NVidia GeForce3"}, \
-	{0x10DE, 0x0201, NV20, "NVidia GeForce3 Ti 200"}, \
-	{0x10DE, 0x0202, NV20, "NVidia GeForce3 Ti 500"}, \
-	{0x10DE, 0x0203, NV20, "NVidia Quadro DCC"}, \
-	{0x10DE, 0x0250, NV20, "NVidia GeForce4 Ti 4600"}, \
-	{0x10DE, 0x0251, NV20, "NVidia GeForce4 Ti 4400"}, \
-	{0x10DE, 0x0252, NV20, "NVidia 0x0252"}, \
-	{0x10DE, 0x0253, NV20, "NVidia GeForce4 Ti 4200"}, \
-	{0x10DE, 0x0258, NV20, "NVidia Quadro4 900 XGL"}, \
-	{0x10DE, 0x0259, NV20, "NVidia Quadro4 750 XGL"}, \
-	{0x10DE, 0x025B, NV20, "NVidia Quadro4 700 XGL"}, \
-	{0x10DE, 0x0280, NV20, "NVidia GeForce4 Ti 4800"}, \
-	{0x10DE, 0x0281, NV20, "NVidia GeForce4 Ti 4200 with AGP8X"}, \
-	{0x10DE, 0x0282, NV20, "NVidia GeForce4 Ti 4800 SE"}, \
-	{0x10DE, 0x0286, NV20, "NVidia GeForce4 4200 Go"}, \
-	{0x10DE, 0x028C, NV20, "NVidia Quadro4 700 GoGL"}, \
-	{0x10DE, 0x0288, NV20, "NVidia Quadro4 980 XGL"}, \
-	{0x10DE, 0x0289, NV20, "NVidia Quadro4 780 XGL"}, \
-	{0x10DE, 0x0301, NV30, "NVidia GeForce FX 5800 Ultra"}, \
-	{0x10DE, 0x0302, NV30, "NVidia GeForce FX 5800"}, \
-	{0x10DE, 0x0308, NV30, "NVidia Quadro FX 2000"}, \
-	{0x10DE, 0x0309, NV30, "NVidia Quadro FX 1000"}, \
-	{0x10DE, 0x0311, NV30, "NVidia GeForce FX 5600 Ultra"}, \
-	{0x10DE, 0x0312, NV30, "NVidia GeForce FX 5600"}, \
-	{0x10DE, 0x0313, NV30, "NVidia 0x0313"}, \
-	{0x10DE, 0x0314, NV30, "NVidia GeForce FX 5600SE"}, \
-	{0x10DE, 0x0316, NV30, "NVidia 0x0316"}, \
-	{0x10DE, 0x0317, NV30, "NVidia 0x0317"}, \
-	{0x10DE, 0x031A, NV30, "NVidia GeForce FX Go5600"}, \
-	{0x10DE, 0x031B, NV30, "NVidia GeForce FX Go5650"}, \
-	{0x10DE, 0x031C, NV30, "NVidia Quadro FX Go700"}, \
-	{0x10DE, 0x031D, NV30, "NVidia 0x031D"}, \
-	{0x10DE, 0x031E, NV30, "NVidia 0x031E"}, \
-	{0x10DE, 0x031F, NV30, "NVidia 0x031F"}, \
-	{0x10DE, 0x0320, NV30, "NVidia GeForce FX 5200"}, \
-	{0x10DE, 0x0321, NV30, "NVidia GeForce FX 5200 Ultra"}, \
-	{0x10DE, 0x0322, NV30, "NVidia GeForce FX 5200"}, \
-	{0x10DE, 0x0323, NV30, "NVidia GeForce FX 5200SE"}, \
-	{0x10DE, 0x0324, NV30, "NVidia GeForce FX Go5200"}, \
-	{0x10DE, 0x0325, NV30, "NVidia GeForce FX Go5250"}, \
-	{0x10DE, 0x0326, NV30, "NVidia GeForce FX 5500"}, \
-	{0x10DE, 0x0327, NV30, "NVidia GeForce FX 5100"}, \
-	{0x10DE, 0x0328, NV30, "NVidia GeForce FX Go5200 32M/64M"}, \
-	{0x10DE, 0x0329, NV30, "NVidia GeForce FX 5200 (Mac)"}, \
-	{0x10DE, 0x032A, NV30, "NVidia Quadro NVS 280 PCI"}, \
-	{0x10DE, 0x032B, NV30, "NVidia Quadro FX 500/600 PCI"}, \
-	{0x10DE, 0x032C, NV30, "NVidia GeForce FX Go53xx Series"}, \
-	{0x10DE, 0x032D, NV30, "NVidia GeForce FX Go5100"}, \
-	{0x10DE, 0x032F, NV30, "NVidia 0x032F"}, \
-	{0x10DE, 0x0330, NV30, "NVidia GeForce FX 5900 Ultra"}, \
-	{0x10DE, 0x0331, NV30, "NVidia GeForce FX 5900"}, \
-	{0x10DE, 0x0332, NV30, "NVidia GeForce FX 5900XT"}, \
-	{0x10DE, 0x0333, NV30, "NVidia GeForce FX 5950 Ultra"}, \
-	{0x10DE, 0x033F, NV30, "NVidia Quadro FX 700"}, \
-	{0x10DE, 0x0334, NV30, "NVidia GeForce FX 5900ZT"}, \
-	{0x10DE, 0x0338, NV30, "NVidia Quadro FX 3000"}, \
-	{0x10DE, 0x0341, NV30, "NVidia GeForce FX 5700 Ultra"}, \
-	{0x10DE, 0x0342, NV30, "NVidia GeForce FX 5700"}, \
-	{0x10DE, 0x0343, NV30, "NVidia GeForce FX 5700LE"}, \
-	{0x10DE, 0x0344, NV30, "NVidia GeForce FX 5700VE"}, \
-	{0x10DE, 0x0345, NV30, "NVidia 0x0345"}, \
-	{0x10DE, 0x0347, NV30, "NVidia GeForce FX Go5700"}, \
-	{0x10DE, 0x0348, NV30, "NVidia GeForce FX Go5700"}, \
-	{0x10DE, 0x0349, NV30, "NVidia 0x0349"}, \
-	{0x10DE, 0x034B, NV30, "NVidia 0x034B"}, \
-	{0x10DE, 0x034C, NV30, "NVidia Quadro FX Go1000"}, \
-	{0x10DE, 0x034E, NV30, "NVidia Quadro FX 1100"}, \
-	{0x10DE, 0x034F, NV30, "NVidia 0x034F"}, \
-	{0x10DE, 0x0040, NV40, "NVidia GeForce 6800 Ultra"}, \
-	{0x10DE, 0x0041, NV40, "NVidia GeForce 6800"}, \
-	{0x10DE, 0x0042, NV40, "NVidia GeForce 6800 LE"}, \
-	{0x10DE, 0x0043, NV40, "NVidia 0x0043"}, \
-	{0x10DE, 0x0045, NV40, "NVidia GeForce 6800 GT"}, \
-	{0x10DE, 0x0046, NV40, "NVidia GeForce 6800 GT"}, \
-	{0x10DE, 0x0049, NV40, "NVidia 0x0049"}, \
-	{0x10DE, 0x004E, NV40, "NVidia Quadro FX 4000"}, \
-	{0x10DE, 0x00C0, NV40, "NVidia 0x00C0"}, \
-	{0x10DE, 0x00C1, NV40, "NVidia GeForce 6800"}, \
-	{0x10DE, 0x00C2, NV40, "NVidia GeForce 6800 LE"}, \
-	{0x10DE, 0x00C8, NV40, "NVidia GeForce Go 6800"}, \
-	{0x10DE, 0x00C9, NV40, "NVidia GeForce Go 6800 Ultra"}, \
-	{0x10DE, 0x00CC, NV40, "NVidia Quadro FX Go1400"}, \
-	{0x10DE, 0x00CD, NV40, "NVidia Quadro FX 3450/4000 SDI"}, \
-	{0x10DE, 0x00CE, NV40, "NVidia Quadro FX 1400"}, \
-	{0x10de, 0x00f0, NV40, "Nvidia GeForce 6600 GT"}, \
-	{0x10de, 0x00f1, NV40, "Nvidia GeForce 6600 GT"}, \
-	{0x10DE, 0x0140, NV40, "NVidia GeForce 6600 GT"}, \
-	{0x10DE, 0x0141, NV40, "NVidia GeForce 6600"}, \
-	{0x10DE, 0x0142, NV40, "NVidia GeForce 6600 LE"}, \
-	{0x10DE, 0x0143, NV40, "NVidia 0x0143"}, \
-	{0x10DE, 0x0144, NV40, "NVidia GeForce Go 6600"}, \
-	{0x10DE, 0x0145, NV40, "NVidia GeForce 6610 XL"}, \
-	{0x10DE, 0x0146, NV40, "NVidia GeForce Go 6600 TE/6200 TE"}, \
-	{0x10DE, 0x0147, NV40, "NVidia GeForce 6700 XL"}, \
-	{0x10DE, 0x0148, NV40, "NVidia GeForce Go 6600"}, \
-	{0x10DE, 0x0149, NV40, "NVidia GeForce Go 6600 GT"}, \
-	{0x10DE, 0x014B, NV40, "NVidia 0x014B"}, \
-	{0x10DE, 0x014C, NV40, "NVidia 0x014C"}, \
-	{0x10DE, 0x014D, NV40, "NVidia 0x014D"}, \
-	{0x10DE, 0x014E, NV40, "NVidia Quadro FX 540"}, \
-	{0x10DE, 0x014F, NV40, "NVidia GeForce 6200"}, \
-	{0x10DE, 0x0160, NV40, "NVidia 0x0160"}, \
-	{0x10DE, 0x0161, NV40, "NVidia GeForce 6200 TurboCache(TM)"}, \
-	{0x10DE, 0x0162, NV40, "NVidia GeForce 6200SE TurboCache(TM)"}, \
-	{0x10DE, 0x0163, NV40, "NVidia 0x0163"}, \
-	{0x10DE, 0x0164, NV40, "NVidia GeForce Go 6200"}, \
-	{0x10DE, 0x0165, NV40, "NVidia Quadro NVS 285"}, \
-	{0x10DE, 0x0166, NV40, "NVidia GeForce Go 6400"}, \
-	{0x10DE, 0x0167, NV40, "NVidia GeForce Go 6200"}, \
-	{0x10DE, 0x0168, NV40, "NVidia GeForce Go 6400"}, \
-	{0x10DE, 0x0169, NV40, "NVidia 0x0169"}, \
-	{0x10DE, 0x016B, NV40, "NVidia 0x016B"}, \
-	{0x10DE, 0x016C, NV40, "NVidia 0x016C"}, \
-	{0x10DE, 0x016D, NV40, "NVidia 0x016D"}, \
-	{0x10DE, 0x016E, NV40, "NVidia 0x016E"}, \
-	{0x10DE, 0x0210, NV40, "NVidia 0x0210"}, \
-	{0x10DE, 0x0211, NV40, "NVidia GeForce 6800"}, \
-	{0x10DE, 0x0212, NV40, "NVidia GeForce 6800 LE"}, \
-	{0x10DE, 0x0215, NV40, "NVidia GeForce 6800 GT"}, \
-	{0x10DE, 0x0220, NV40, "NVidia 0x0220"}, \
-	{0x10DE, 0x0221, NV40, "NVidia GeForce 6200"}, \
-	{0x10DE, 0x0222, NV40, "NVidia 0x0222"}, \
-	{0x10DE, 0x0228, NV40, "NVidia 0x0228"}, \
-	{0x10DE, 0x0090, NV40, "NVidia 0x0090"}, \
-	{0x10DE, 0x0091, NV40, "NVidia GeForce 7800 GTX"}, \
-	{0x10DE, 0x0092, NV40, "NVidia 0x0092"}, \
-	{0x10DE, 0x0093, NV40, "NVidia 0x0093"}, \
-	{0x10DE, 0x0094, NV40, "NVidia 0x0094"}, \
-	{0x10DE, 0x0098, NV40, "NVidia 0x0098"}, \
-	{0x10DE, 0x0099, NV40, "NVidia GeForce Go 7800 GTX"}, \
-	{0x10DE, 0x009C, NV40, "NVidia 0x009C"}, \
-	{0x10DE, 0x009D, NV40, "NVidia Quadro FX 4500"}, \
-	{0x10DE, 0x009E, NV40, "NVidia 0x009E"}, \
-	{0, 0, 0, NULL}
-
 #define xgi_PCI_IDS \
-	{0x18ca, 0x2200, 0, "XP5"}, \
-	{0x18ca, 0x0047, 0, "XP10 / XG47"}, \
+	{0x18CA, 0x0047, 0, "XP10 / XG47"}, \
+	{0x18CA, 0x2200, 0, "XP5"}, \
 	{0, 0, 0, NULL}

Modified: trunk/sys/dev/drm2/drm_sarea.h
===================================================================
--- trunk/sys/dev/drm2/drm_sarea.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_sarea.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_sarea.h 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_sarea.h 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #ifndef _DRM_SAREA_H_
 #define _DRM_SAREA_H_
@@ -40,12 +40,14 @@
 
 /* SAREA area needs to be at least a page */
 #if defined(__alpha__)
-#define SAREA_MAX                       0x2000
+#define SAREA_MAX                       0x2000U
+#elif defined(__mips__)
+#define SAREA_MAX                       0x4000U
 #elif defined(__ia64__)
 #define SAREA_MAX                       0x10000	/* 64kB */
 #else
 /* Intel 830M driver needs at least 8k SAREA */
-#define SAREA_MAX                       0x2000UL
+#define SAREA_MAX                       0x2000U
 #endif
 
 /** Maximum number of drawables in the SAREA */

Modified: trunk/sys/dev/drm2/drm_scatter.c
===================================================================
--- trunk/sys/dev/drm2/drm_scatter.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_scatter.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -24,7 +24,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_scatter.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_scatter.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 /** @file drm_scatter.c
  * Allocation of memory for scatter-gather mappings by the graphics chip.
@@ -34,97 +34,104 @@
 
 #include <dev/drm2/drmP.h>
 
-int
-drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request)
+#define DEBUG_SCATTER 0
+
+static inline vm_offset_t drm_vmalloc_dma(vm_size_t size)
 {
+	return kmem_alloc_attr(kernel_arena, size, M_NOWAIT | M_ZERO,
+	    0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
+}
+
+void drm_sg_cleanup(struct drm_sg_mem * entry)
+{
+	if (entry == NULL)
+		return;
+
+	if (entry->vaddr != 0)
+		kmem_free(kernel_arena, entry->vaddr, IDX_TO_OFF(entry->pages));
+
+	free(entry->busaddr, DRM_MEM_SGLISTS);
+	free(entry, DRM_MEM_DRIVER);
+}
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+{
 	struct drm_sg_mem *entry;
 	vm_size_t size;
 	vm_pindex_t pindex;
 
+	DRM_DEBUG("\n");
+
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
 	if (dev->sg)
-		return EINVAL;
+		return -EINVAL;
 
+	entry = malloc(sizeof(*entry), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!entry)
+		return -ENOMEM;
+
 	DRM_DEBUG("request size=%ld\n", request->size);
 
-	entry = malloc(sizeof(*entry), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-
 	size = round_page(request->size);
 	entry->pages = OFF_TO_IDX(size);
 	entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
-	    DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
+	    DRM_MEM_SGLISTS, M_NOWAIT | M_ZERO);
+	if (!entry->busaddr) {
+		free(entry, DRM_MEM_DRIVER);
+		return -ENOMEM;
+	}
 
-	entry->vaddr = kmem_alloc_attr(kernel_map, size, M_WAITOK | M_ZERO,
-	    0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
+	entry->vaddr = drm_vmalloc_dma(size);
 	if (entry->vaddr == 0) {
-		drm_sg_cleanup(entry);
-		return (ENOMEM);
+		free(entry->busaddr, DRM_MEM_DRIVER);
+		free(entry, DRM_MEM_DRIVER);
+		return -ENOMEM;
 	}
 
-	for(pindex = 0; pindex < entry->pages; pindex++) {
+	for (pindex = 0; pindex < entry->pages; pindex++) {
 		entry->busaddr[pindex] =
 		    vtophys(entry->vaddr + IDX_TO_OFF(pindex));
 	}
 
-	DRM_LOCK(dev);
-	if (dev->sg) {
-		DRM_UNLOCK(dev);
-		drm_sg_cleanup(entry);
-		return (EINVAL);
-	}
+	request->handle = entry->vaddr;
+
 	dev->sg = entry;
-	DRM_UNLOCK(dev);
 
-	request->handle = entry->vaddr;
-
 	DRM_DEBUG("allocated %ju pages @ 0x%08zx, contents=%08lx\n",
 	    entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr);
 
-	return (0);
+	return 0;
 }
 
-int
-drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
 {
 	struct drm_scatter_gather *request = data;
 
-	DRM_DEBUG("\n");
+	return drm_sg_alloc(dev, request);
 
-	return (drm_sg_alloc(dev, request));
 }
 
-void
-drm_sg_cleanup(struct drm_sg_mem *entry)
+int drm_sg_free(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
 {
-	if (entry == NULL)
-		return;
-
-	if (entry->vaddr != 0)
-		kmem_free(kernel_map, entry->vaddr, IDX_TO_OFF(entry->pages));
-
-	free(entry->busaddr, DRM_MEM_SGLISTS);
-	free(entry, DRM_MEM_DRIVER);
-
-	return;
-}
-
-int
-drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
 	struct drm_scatter_gather *request = data;
 	struct drm_sg_mem *entry;
 
-	DRM_LOCK(dev);
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
 	entry = dev->sg;
 	dev->sg = NULL;
-	DRM_UNLOCK(dev);
 
 	if (!entry || entry->vaddr != request->handle)
-		return (EINVAL);
+		return -EINVAL;
 
 	DRM_DEBUG("free 0x%zx\n", entry->vaddr);
 
 	drm_sg_cleanup(entry);
 
-	return (0);
+	return 0;
 }

Modified: trunk/sys/dev/drm2/drm_stub.c
===================================================================
--- trunk/sys/dev/drm2/drm_stub.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_stub.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -33,29 +33,470 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_stub.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_stub.c 295623 2016-02-15 07:35:40Z dumbbell $");
 
-#include "drmP.h"
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_core.h>
 
-int
-drm_setmaster_ioctl(struct drm_device *dev, void *data,
-    struct drm_file *file_priv)
+#ifdef DRM_DEBUG_DEFAULT_ON
+unsigned int drm_debug = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS |
+    DRM_DEBUGBITS_FAILED_IOCTL);
+#else
+unsigned int drm_debug = 0;	/* 1 to enable debug output */
+#endif
+EXPORT_SYMBOL(drm_debug);
+
+unsigned int drm_notyet = 0;
+
+unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
+EXPORT_SYMBOL(drm_vblank_offdelay);
+
+unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
+EXPORT_SYMBOL(drm_timestamp_precision);
+
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
+
+module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+
+static struct cdevsw drm_cdevsw = {
+	.d_version =	D_VERSION,
+	.d_open =	drm_open,
+	.d_read =	drm_read,
+	.d_ioctl =	drm_ioctl,
+	.d_poll =	drm_poll,
+	.d_mmap_single = drm_mmap_single,
+	.d_name =	"drm",
+	.d_flags =	D_TRACKCLOSE
+};
+
+static int drm_minor_get_id(struct drm_device *dev, int type)
 {
+	int new_id;
 
-	DRM_DEBUG("setmaster\n");
+	new_id = device_get_unit(dev->dev);
 
-	if (file_priv->master != 0)
-		return (0);
-	return (-EPERM);
+	if (new_id >= 64)
+		return -EINVAL;
+
+	if (type == DRM_MINOR_CONTROL) {
+		new_id += 64;
+	} else if (type == DRM_MINOR_RENDER) {
+		new_id += 128;
+	}
+
+	return new_id;
 }
 
-int
-drm_dropmaster_ioctl(struct drm_device *dev, void *data,
-    struct drm_file *file_priv)
+struct drm_master *drm_master_create(struct drm_minor *minor)
 {
+	struct drm_master *master;
 
-	DRM_DEBUG("dropmaster\n");
-	if (file_priv->master != 0)
-		return (-EINVAL);
-	return (0);
+	master = malloc(sizeof(*master), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+	if (!master)
+		return NULL;
+
+	refcount_init(&master->refcount, 1);
+	mtx_init(&master->lock.spinlock, "drm_master__lock__spinlock",
+	    NULL, MTX_DEF);
+	DRM_INIT_WAITQUEUE(&master->lock.lock_queue);
+	drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+	INIT_LIST_HEAD(&master->magicfree);
+	master->minor = minor;
+
+	list_add_tail(&master->head, &minor->master_list);
+
+	return master;
 }
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+	refcount_acquire(&master->refcount);
+	return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct drm_master *master)
+{
+	struct drm_magic_entry *pt, *next;
+	struct drm_device *dev = master->minor->dev;
+	struct drm_map_list *r_list, *list_temp;
+
+	list_del(&master->head);
+
+	if (dev->driver->master_destroy)
+		dev->driver->master_destroy(dev, master);
+
+	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+		if (r_list->master == master) {
+			drm_rmmap_locked(dev, r_list->map);
+			r_list = NULL;
+		}
+	}
+
+	if (master->unique) {
+		free(master->unique, DRM_MEM_DRIVER);
+		master->unique = NULL;
+		master->unique_len = 0;
+	}
+
+	list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+		list_del(&pt->head);
+		drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+		free(pt, DRM_MEM_MAGIC);
+	}
+
+	drm_ht_remove(&master->magiclist);
+
+	free(master, DRM_MEM_KMS);
+}
+
+void drm_master_put(struct drm_master **master)
+{
+	if (refcount_release(&(*master)->refcount))
+		drm_master_destroy(*master);
+	*master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	int ret;
+
+	if (file_priv->is_master)
+		return 0;
+
+	if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+		return -EINVAL;
+
+	if (!file_priv->master)
+		return -EINVAL;
+
+	if (file_priv->minor->master)
+		return -EINVAL;
+
+	DRM_LOCK(dev);
+	file_priv->minor->master = drm_master_get(file_priv->master);
+	file_priv->is_master = 1;
+	if (dev->driver->master_set) {
+		ret = dev->driver->master_set(dev, file_priv, false);
+		if (unlikely(ret != 0)) {
+			file_priv->is_master = 0;
+			drm_master_put(&file_priv->minor->master);
+		}
+	}
+	DRM_UNLOCK(dev);
+
+	return 0;
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	if (!file_priv->is_master)
+		return -EINVAL;
+
+	if (!file_priv->minor->master)
+		return -EINVAL;
+
+	DRM_LOCK(dev);
+	if (dev->driver->master_drop)
+		dev->driver->master_drop(dev, file_priv, false);
+	drm_master_put(&file_priv->minor->master);
+	file_priv->is_master = 0;
+	DRM_UNLOCK(dev);
+	return 0;
+}
+
+int drm_fill_in_dev(struct drm_device *dev,
+			   struct drm_driver *driver)
+{
+	int retcode, i;
+
+	INIT_LIST_HEAD(&dev->filelist);
+	INIT_LIST_HEAD(&dev->ctxlist);
+	INIT_LIST_HEAD(&dev->maplist);
+	INIT_LIST_HEAD(&dev->vblank_event_list);
+
+	mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
+	mtx_init(&dev->count_lock, "drmcount", NULL, MTX_DEF);
+	mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
+	sx_init(&dev->dev_struct_lock, "drmslk");
+	mtx_init(&dev->ctxlist_mutex, "drmctxlist", NULL, MTX_DEF);
+	mtx_init(&dev->pcir_lock, "drmpcir", NULL, MTX_DEF);
+
+	if (drm_ht_create(&dev->map_hash, 12)) {
+		return -ENOMEM;
+	}
+
+	/* the DRM has 6 basic counters */
+	dev->counters = 6;
+	dev->types[0] = _DRM_STAT_LOCK;
+	dev->types[1] = _DRM_STAT_OPENS;
+	dev->types[2] = _DRM_STAT_CLOSES;
+	dev->types[3] = _DRM_STAT_IOCTLS;
+	dev->types[4] = _DRM_STAT_LOCKS;
+	dev->types[5] = _DRM_STAT_UNLOCKS;
+
+	/*
+	 * FIXME Linux<->FreeBSD: this is done in drm_setup() on Linux.
+	 */
+	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+		atomic_set(&dev->counts[i], 0);
+
+	dev->driver = driver;
+
+	retcode = drm_pci_agp_init(dev);
+	if (retcode)
+		goto error_out_unreg;
+
+
+
+	retcode = drm_ctxbitmap_init(dev);
+	if (retcode) {
+		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+		goto error_out_unreg;
+	}
+
+	if (driver->driver_features & DRIVER_GEM) {
+		retcode = drm_gem_init(dev);
+		if (retcode) {
+			DRM_ERROR("Cannot initialize graphics execution "
+				  "manager (GEM)\n");
+			goto error_out_unreg;
+		}
+	}
+
+	retcode = drm_sysctl_init(dev);
+	if (retcode != 0) {
+		DRM_ERROR("Failed to create hw.dri sysctl entry: %d\n",
+		    retcode);
+	}
+
+	return 0;
+
+      error_out_unreg:
+	drm_cancel_fill_in_dev(dev);
+	return retcode;
+}
+EXPORT_SYMBOL(drm_fill_in_dev);
+
+void drm_cancel_fill_in_dev(struct drm_device *dev)
+{
+	struct drm_driver *driver;
+
+	driver = dev->driver;
+
+	drm_sysctl_cleanup(dev);
+	if (driver->driver_features & DRIVER_GEM)
+		drm_gem_destroy(dev);
+	drm_ctxbitmap_cleanup(dev);
+
+	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->agp_mtrr >= 0) {
+		int retval;
+		retval = drm_mtrr_del(dev->agp->agp_mtrr,
+				  dev->agp->agp_info.ai_aperture_base,
+				  dev->agp->agp_info.ai_aperture_size,
+				  DRM_MTRR_WC);
+		DRM_DEBUG("mtrr_del=%d\n", retval);
+	}
+	free(dev->agp, DRM_MEM_AGPLISTS);
+	dev->agp = NULL;
+
+	drm_ht_remove(&dev->map_hash);
+
+	mtx_destroy(&dev->irq_lock);
+	mtx_destroy(&dev->count_lock);
+	mtx_destroy(&dev->event_lock);
+	sx_destroy(&dev->dev_struct_lock);
+	mtx_destroy(&dev->ctxlist_mutex);
+	mtx_destroy(&dev->pcir_lock);
+}
+
+/**
+ * Get a secondary minor number.
+ *
+ * \param dev device data structure
+ * \param sec-minor structure to hold the assigned minor
+ * \return negative number on failure.
+ *
+ * Search an empty entry and initialize it to the given parameters, and
+ * create the proc init entry via proc_init(). This routines assigns
+ * minor numbers to secondary heads of multi-headed cards
+ */
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+{
+	struct drm_minor *new_minor;
+	int ret;
+	int minor_id;
+	const char *minor_devname;
+
+	DRM_DEBUG("\n");
+
+	minor_id = drm_minor_get_id(dev, type);
+	if (minor_id < 0)
+		return minor_id;
+
+	new_minor = malloc(sizeof(struct drm_minor), DRM_MEM_MINOR,
+	    M_NOWAIT | M_ZERO);
+	if (!new_minor) {
+		ret = -ENOMEM;
+		goto err_idr;
+	}
+
+	new_minor->type = type;
+	new_minor->dev = dev;
+	new_minor->index = minor_id;
+	INIT_LIST_HEAD(&new_minor->master_list);
+
+	new_minor->buf_sigio = NULL;
+
+	switch (type) {
+	case DRM_MINOR_CONTROL:
+		minor_devname = "dri/controlD%d";
+		break;
+	case DRM_MINOR_RENDER:
+		minor_devname = "dri/renderD%d";
+		break;
+	default:
+		minor_devname = "dri/card%d";
+		break;
+	}
+
+	ret = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &new_minor->device,
+	    &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID,
+	    DRM_DEV_MODE, minor_devname, minor_id);
+	if (ret) {
+		DRM_ERROR("Failed to create cdev: %d\n", ret);
+		goto err_mem;
+	}
+	new_minor->device->si_drv1 = new_minor;
+	*minor = new_minor;
+
+	DRM_DEBUG("new minor assigned %d\n", minor_id);
+	return 0;
+
+
+err_mem:
+	free(new_minor, DRM_MEM_MINOR);
+err_idr:
+	*minor = NULL;
+	return ret;
+}
+EXPORT_SYMBOL(drm_get_minor);
+
+/**
+ * Put a secondary minor number.
+ *
+ * \param sec_minor - structure to be released
+ * \return always zero
+ *
+ * Cleans up the proc resources. Not legal for this to be the
+ * last minor released.
+ *
+ */
+int drm_put_minor(struct drm_minor **minor_p)
+{
+	struct drm_minor *minor = *minor_p;
+
+	DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+	funsetown(&minor->buf_sigio);
+
+	destroy_dev(minor->device);
+
+	free(minor, DRM_MEM_MINOR);
+	*minor_p = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(drm_put_minor);
+
+/**
+ * Called via drm_exit() at module unload time or when pci device is
+ * unplugged.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ *
+ */
+void drm_put_dev(struct drm_device *dev)
+{
+	struct drm_driver *driver;
+	struct drm_map_list *r_list, *list_temp;
+
+	DRM_DEBUG("\n");
+
+	if (!dev) {
+		DRM_ERROR("cleanup called no dev\n");
+		return;
+	}
+	driver = dev->driver;
+
+	drm_lastclose(dev);
+
+	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->agp_mtrr >= 0) {
+		int retval;
+		retval = drm_mtrr_del(dev->agp->agp_mtrr,
+				  dev->agp->agp_info.ai_aperture_base,
+				  dev->agp->agp_info.ai_aperture_size,
+				  DRM_MTRR_WC);
+		DRM_DEBUG("mtrr_del=%d\n", retval);
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_mode_group_free(&dev->primary->mode_group);
+
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+
+	drm_sysctl_cleanup(dev);
+
+	if (drm_core_has_AGP(dev) && dev->agp) {
+		free(dev->agp, DRM_MEM_AGPLISTS);
+		dev->agp = NULL;
+	}
+
+	drm_vblank_cleanup(dev);
+
+	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+		drm_rmmap(dev, r_list->map);
+	drm_ht_remove(&dev->map_hash);
+
+	drm_ctxbitmap_cleanup(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_put_minor(&dev->control);
+
+	if (driver->driver_features & DRIVER_GEM)
+		drm_gem_destroy(dev);
+
+	drm_put_minor(&dev->primary);
+
+	mtx_destroy(&dev->irq_lock);
+	mtx_destroy(&dev->count_lock);
+	mtx_destroy(&dev->event_lock);
+	sx_destroy(&dev->dev_struct_lock);
+	mtx_destroy(&dev->ctxlist_mutex);
+	mtx_destroy(&dev->pcir_lock);
+
+#ifdef FREEBSD_NOTYET
+	list_del(&dev->driver_item);
+#endif /* FREEBSD_NOTYET */
+}
+EXPORT_SYMBOL(drm_put_dev);

Modified: trunk/sys/dev/drm2/drm_sysctl.c
===================================================================
--- trunk/sys/dev/drm2/drm_sysctl.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_sysctl.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -9,11 +9,11 @@
  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  * and/or sell copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following conditions:
- * 
+ *
  * The above copyright notice and this permission notice (including the next
  * paragraph) shall be included in all copies or substantial portions of the
  * Software.
- * 
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
@@ -23,7 +23,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_sysctl.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_sysctl.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 /** @file drm_sysctl.c
  * Implementation of various sysctls for controlling DRM behavior and reporting
@@ -71,8 +71,11 @@
 	/* Add the sysctl node for DRI if it doesn't already exist */
 	drioid = SYSCTL_ADD_NODE(&info->ctx, &sysctl__hw_children, OID_AUTO,
 	    "dri", CTLFLAG_RW, NULL, "DRI Graphics");
-	if (!drioid)
-		return 1;
+	if (!drioid) {
+		free(dev->sysctl, DRM_MEM_DRIVER);
+		dev->sysctl = NULL;
+		return (-ENOMEM);
+	}
 
 	/* Find the next free slot under hw.dri */
 	i = 0;
@@ -80,9 +83,11 @@
 		if (i <= oid->oid_arg2)
 			i = oid->oid_arg2 + 1;
 	}
-	if (i > 9)
-		return (1);
-	
+	if (i > 9) {
+		drm_sysctl_cleanup(dev);
+		return (-ENOSPC);
+	}
+
 	dev->sysctl_node_idx = i;
 	/* Add the hw.dri.x for our device */
 	info->name[0] = '0' + i;
@@ -89,9 +94,11 @@
 	info->name[1] = 0;
 	top = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(drioid),
 	    OID_AUTO, info->name, CTLFLAG_RW, NULL, NULL);
-	if (!top)
-		return 1;
-	
+	if (!top) {
+		drm_sysctl_cleanup(dev);
+		return (-ENOMEM);
+	}
+
 	for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
 		oid = SYSCTL_ADD_OID(&info->ctx,
 			SYSCTL_CHILDREN(top),
@@ -103,14 +110,16 @@
 			drm_sysctl_list[i].f,
 			"A",
 			NULL);
-		if (!oid)
-			return 1;
+		if (!oid) {
+			drm_sysctl_cleanup(dev);
+			return (-ENOMEM);
+		}
 	}
 	SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, "debug",
-	    CTLFLAG_RW, &drm_debug_flag, sizeof(drm_debug_flag),
+	    CTLFLAG_RW, &drm_debug, sizeof(drm_debug),
 	    "Enable debugging output");
 	SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, "notyet",
-	    CTLFLAG_RW, &drm_notyet_flag, sizeof(drm_debug_flag),
+	    CTLFLAG_RW, &drm_notyet, sizeof(drm_debug),
 	    "Enable notyet reminders");
 
 	if (dev->driver->sysctl_init != NULL)
@@ -132,6 +141,9 @@
 {
 	int error;
 
+	if (dev->sysctl == NULL)
+		return (0);
+
 	error = sysctl_ctx_free(&dev->sysctl->ctx);
 	free(dev->sysctl, DRM_MEM_DRIVER);
 	dev->sysctl = NULL;
@@ -138,7 +150,7 @@
 	if (dev->driver->sysctl_cleanup != NULL)
 		dev->driver->sysctl_cleanup(dev);
 
-	return (error);
+	return (-error);
 }
 
 #define DRM_SYSCTL_PRINT(fmt, arg...)				\
@@ -152,19 +164,24 @@
 static int drm_name_info DRM_SYSCTL_HANDLER_ARGS
 {
 	struct drm_device *dev = arg1;
+	struct drm_minor *minor;
+	struct drm_master *master;
 	char buf[128];
 	int retcode;
 	int hasunique = 0;
 
-	DRM_SYSCTL_PRINT("%s 0x%x", dev->driver->name, dev2udev(dev->devnode));
+	/* FIXME: This still uses primary minor. */
+	minor = dev->primary;
+	DRM_SYSCTL_PRINT("%s 0x%x", dev->driver->name, dev2udev(minor->device));
 	
 	DRM_LOCK(dev);
-	if (dev->unique) {
-		snprintf(buf, sizeof(buf), " %s", dev->unique);
+	master = minor->master;
+	if (master != NULL && master->unique) {
+		snprintf(buf, sizeof(buf), " %s", master->unique);
 		hasunique = 1;
 	}
 	DRM_UNLOCK(dev);
-	
+
 	if (hasunique)
 		SYSCTL_OUT(req, buf, strlen(buf));
 
@@ -177,8 +194,17 @@
 static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
 {
 	struct drm_device *dev = arg1;
-	drm_local_map_t *map, *tempmaps;
-	const char   *types[] = { "FB", "REG", "SHM", "AGP", "SG" };
+	struct drm_map_list *entry;
+	struct drm_local_map *map, *tempmaps;
+	const char *types[] = {
+		[_DRM_FRAME_BUFFER] = "FB",
+		[_DRM_REGISTERS] = "REG",
+		[_DRM_SHM] = "SHM",
+		[_DRM_AGP] = "AGP",
+		[_DRM_SCATTER_GATHER] = "SG",
+		[_DRM_CONSISTENT] = "CONS",
+		[_DRM_GEM] = "GEM"
+	};
 	const char *type, *yesno;
 	int i, mapcount;
 	char buf[128];
@@ -190,10 +216,12 @@
 	DRM_LOCK(dev);
 
 	mapcount = 0;
-	TAILQ_FOREACH(map, &dev->maplist, link)
-		mapcount++;
+	list_for_each_entry(entry, &dev->maplist, head) {
+		if (entry->map != NULL)
+			mapcount++;
+	}
 
-	tempmaps = malloc(sizeof(drm_local_map_t) * mapcount, DRM_MEM_DRIVER,
+	tempmaps = malloc(sizeof(*tempmaps) * mapcount, DRM_MEM_DRIVER,
 	    M_NOWAIT);
 	if (tempmaps == NULL) {
 		DRM_UNLOCK(dev);
@@ -201,33 +229,43 @@
 	}
 
 	i = 0;
-	TAILQ_FOREACH(map, &dev->maplist, link)
-		tempmaps[i++] = *map;
+	list_for_each_entry(entry, &dev->maplist, head) {
+		if (entry->map != NULL)
+			tempmaps[i++] = *entry->map;
+	}
 
 	DRM_UNLOCK(dev);
 
 	DRM_SYSCTL_PRINT("\nslot offset	        size       "
-	    "type flags address            handle mtrr\n");
+	    "type flags address            mtrr\n");
 
 	for (i = 0; i < mapcount; i++) {
 		map = &tempmaps[i];
 
-		if (map->type < 0 || map->type > 4)
+		switch(map->type) {
+		default:
 			type = "??";
-		else
+			break;
+		case _DRM_FRAME_BUFFER:
+		case _DRM_REGISTERS:
+		case _DRM_SHM:
+		case _DRM_AGP:
+		case _DRM_SCATTER_GATHER:
+		case _DRM_CONSISTENT:
+		case _DRM_GEM:
 			type = types[map->type];
+			break;
+		}
 
-		if (!map->mtrr)
+		if (map->mtrr < 0)
 			yesno = "no";
 		else
 			yesno = "yes";
 
 		DRM_SYSCTL_PRINT(
-		    "%4d 0x%016lx 0x%08lx %4.4s  0x%02x 0x%016lx %6d %s\n",
-		    i, map->offset, map->size, type, map->flags,
-		    (unsigned long)map->virtual,
-		    (unsigned int)((unsigned long)map->handle >>
-		    DRM_MAP_HANDLE_SHIFT), yesno);
+		    "%4d 0x%016llx 0x%08lx %4.4s  0x%02x 0x%016lx %s\n",
+		    i, (unsigned long long)map->offset, map->size, type,
+		    map->flags, (unsigned long)map->handle, yesno);
 	}
 	SYSCTL_OUT(req, "", 1);
 
@@ -239,8 +277,8 @@
 static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
 {
 	struct drm_device	 *dev = arg1;
-	drm_device_dma_t *dma = dev->dma;
-	drm_device_dma_t tempdma;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_device_dma tempdma;
 	int *templists;
 	int i;
 	char buf[128];
@@ -304,7 +342,7 @@
 	DRM_LOCK(dev);
 
 	privcount = 0;
-	TAILQ_FOREACH(priv, &dev->files, link)
+	list_for_each_entry(priv, &dev->filelist, lhead)
 		privcount++;
 
 	tempprivs = malloc(sizeof(struct drm_file) * privcount, DRM_MEM_DRIVER,
@@ -314,7 +352,7 @@
 		return ENOMEM;
 	}
 	i = 0;
-	TAILQ_FOREACH(priv, &dev->files, link)
+	list_for_each_entry(priv, &dev->filelist, lhead)
 		tempprivs[i++] = *priv;
 
 	DRM_UNLOCK(dev);
@@ -325,7 +363,7 @@
 		priv = &tempprivs[i];
 		DRM_SYSCTL_PRINT("%c %-12s %5d %5d %10u %10lu\n",
 			       priv->authenticated ? 'y' : 'n',
-			       devtoname(priv->dev->devnode),
+			       devtoname(priv->minor->device),
 			       priv->pid,
 			       priv->uid,
 			       priv->magic,

Modified: trunk/sys/dev/drm2/drm_vm.c
===================================================================
--- trunk/sys/dev/drm2/drm_vm.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/drm_vm.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,6 +1,17 @@
 /* $MidnightBSD$ */
-/*-
- * Copyright 2003 Eric Anholt
+/**
+ * \file drm_vm.c
+ * Memory mapping for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith at valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -9,21 +20,22 @@
  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  * and/or sell copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following conditions:
- * 
+ *
  * The above copyright notice and this permission notice (including the next
  * paragraph) shall be included in all copies or substantial portions of the
  * Software.
- * 
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
- * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/drm_vm.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 /** @file drm_vm.c
  * Support code for mmaping of DRM maps.
@@ -32,6 +44,7 @@
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
 
+#ifdef FREEBSD_NOTYET
 int
 drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
     int prot, vm_memattr_t *memattr)
@@ -132,4 +145,4 @@
 	*paddr = phys;
 	return 0;
 }
-
+#endif /* FREEBSD_NOTYET */

Modified: trunk/sys/dev/drm2/i915/i915_debug.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_debug.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_debug.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2008 Intel Corporation
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_debug.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_debug.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -43,17 +44,14 @@
 	FLUSHING_LIST,
 	INACTIVE_LIST,
 	PINNED_LIST,
-	DEFERRED_FREE_LIST,
 };
 
-static const char *
-yesno(int v)
+static const char *yesno(int v)
 {
-	return (v ? "yes" : "no");
+	return v ? "yes" : "no";
 }
 
-static int
-i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	const struct intel_device_info *info = INTEL_INFO(dev);
 
@@ -81,11 +79,10 @@
 	B(has_llc);
 #undef B
 
-	return (0);
+	return 0;
 }
 
-static const char *
-get_pin_flag(struct drm_i915_gem_object *obj)
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
 {
 	if (obj->user_pin_count > 0)
 		return "P";
@@ -95,25 +92,23 @@
 		return " ";
 }
 
-static const char *
-get_tiling_flag(struct drm_i915_gem_object *obj)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 {
 	switch (obj->tiling_mode) {
 	default:
-	case I915_TILING_NONE: return (" ");
-	case I915_TILING_X: return ("X");
-	case I915_TILING_Y: return ("Y");
+	case I915_TILING_NONE: return " ";
+	case I915_TILING_X: return "X";
+	case I915_TILING_Y: return "Y";
 	}
 }
 
-static const char *
-cache_level_str(int type)
+static const char *cache_level_str(int type)
 {
 	switch (type) {
 	case I915_CACHE_NONE: return " uncached";
 	case I915_CACHE_LLC: return " snooped (LLC)";
 	case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
-	default: return ("");
+	default: return "";
 	}
 }
 
@@ -135,6 +130,8 @@
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		sbuf_printf(m, " (name: %d)", obj->base.name);
+	if (obj->pin_display)
+		sbuf_printf(m, " (display)");
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		sbuf_printf(m, " (fence: %d)", obj->fence_reg);
 	if (obj->gtt_space != NULL)
@@ -153,8 +150,7 @@
 		sbuf_printf(m, " (%s)", obj->ring->name);
 }
 
-static int
-i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	uintptr_t list = (uintptr_t)data;
 	struct list_head *head;
@@ -164,7 +160,7 @@
 	int count;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 
 	switch (list) {
 	case ACTIVE_LIST:
@@ -175,21 +171,13 @@
 		sbuf_printf(m, "Inactive:\n");
 		head = &dev_priv->mm.inactive_list;
 		break;
-	case PINNED_LIST:
-		sbuf_printf(m, "Pinned:\n");
-		head = &dev_priv->mm.pinned_list;
-		break;
 	case FLUSHING_LIST:
 		sbuf_printf(m, "Flushing:\n");
 		head = &dev_priv->mm.flushing_list;
 		break;
-	case DEFERRED_FREE_LIST:
-		sbuf_printf(m, "Deferred free:\n");
-		head = &dev_priv->mm.deferred_free_list;
-		break;
 	default:
 		DRM_UNLOCK(dev);
-		return (EINVAL);
+		return -EINVAL;
 	}
 
 	total_obj_size = total_gtt_size = count = 0;
@@ -205,7 +193,7 @@
 
 	sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
-	return (0);
+	return 0;
 }
 
 #define count_objects(list, member) do { \
@@ -219,8 +207,7 @@
 	} \
 } while (0)
 
-static int
-i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 count, mappable_count;
@@ -228,7 +215,7 @@
 	struct drm_i915_gem_object *obj;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 	sbuf_printf(m, "%u objects, %zu bytes\n",
 		   dev_priv->mm.object_count,
 		   dev_priv->mm.object_memory);
@@ -245,21 +232,11 @@
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.pinned_list, mm_list);
-	sbuf_printf(m, "  %u [%u] pinned objects, %zu [%zu] bytes\n",
-		   count, mappable_count, size, mappable_size);
-
-	size = count = mappable_size = mappable_count = 0;
 	count_objects(&dev_priv->mm.inactive_list, mm_list);
 	sbuf_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.deferred_free_list, mm_list);
-	sbuf_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
-		   count, mappable_count, size, mappable_size);
-
-	size = count = mappable_size = mappable_count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 		if (obj->fault_mappable) {
 			size += obj->gtt_space->size;
@@ -277,24 +254,28 @@
 
 	sbuf_printf(m, "%zu [%zu] gtt total\n",
 		   dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
+
 	DRM_UNLOCK(dev);
 
-	return (0);
+	return 0;
 }
 
-static int
-i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data)
+static int i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	uintptr_t list = (uintptr_t)data;
 	struct drm_i915_gem_object *obj;
 	size_t total_obj_size, total_gtt_size;
 	int count;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 
 	total_obj_size = total_gtt_size = count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		if (list == PINNED_LIST && obj->pin_count == 0)
+			continue;
+
 		sbuf_printf(m, "   ");
 		describe_obj(m, obj);
 		sbuf_printf(m, "\n");
@@ -308,11 +289,10 @@
 	sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 
-	return (0);
+	return 0;
 }
 
-static int
-i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	struct intel_crtc *crtc;
 	struct drm_i915_gem_object *obj;
@@ -320,8 +300,6 @@
 	char pipe;
 	char plane;
 
-	if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
-		return (0);
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
 		pipe = pipe_name(crtc->pipe);
 		plane = plane_name(crtc->plane);
@@ -359,11 +337,10 @@
 		mtx_unlock(&dev->event_lock);
 	}
 
-	return (0);
+	return 0;
 }
 
-static int
-i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_request *gem_request;
@@ -370,7 +347,7 @@
 	int count;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 
 	count = 0;
 	if (!list_empty(&dev_priv->rings[RCS].request_list)) {
@@ -414,44 +391,78 @@
 	return 0;
 }
 
-static void
-i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
+static void i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
 {
 	if (ring->get_seqno) {
 		sbuf_printf(m, "Current sequence (%s): %d\n",
 			   ring->name, ring->get_seqno(ring));
-		sbuf_printf(m, "Waiter sequence (%s):  %d\n",
-			   ring->name, ring->waiting_seqno);
-		sbuf_printf(m, "IRQ sequence (%s):     %d\n",
-			   ring->name, ring->irq_seqno);
 	}
 }
 
-static int
-i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int i;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
+
 	for (i = 0; i < I915_NUM_RINGS; i++)
 		i915_ring_seqno_info(m, &dev_priv->rings[i]);
+
 	DRM_UNLOCK(dev);
-	return (0);
+
+	return 0;
 }
 
 
-static int
-i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int i, pipe;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 
-	if (!HAS_PCH_SPLIT(dev)) {
+	if (IS_VALLEYVIEW(dev)) {
+		sbuf_printf(m, "Display IER:\t%08x\n",
+			   I915_READ(VLV_IER));
+		sbuf_printf(m, "Display IIR:\t%08x\n",
+			   I915_READ(VLV_IIR));
+		sbuf_printf(m, "Display IIR_RW:\t%08x\n",
+			   I915_READ(VLV_IIR_RW));
+		sbuf_printf(m, "Display IMR:\t%08x\n",
+			   I915_READ(VLV_IMR));
+		for_each_pipe(pipe)
+			sbuf_printf(m, "Pipe %c stat:\t%08x\n",
+				   pipe_name(pipe),
+				   I915_READ(PIPESTAT(pipe)));
+
+		sbuf_printf(m, "Master IER:\t%08x\n",
+			   I915_READ(VLV_MASTER_IER));
+
+		sbuf_printf(m, "Render IER:\t%08x\n",
+			   I915_READ(GTIER));
+		sbuf_printf(m, "Render IIR:\t%08x\n",
+			   I915_READ(GTIIR));
+		sbuf_printf(m, "Render IMR:\t%08x\n",
+			   I915_READ(GTIMR));
+
+		sbuf_printf(m, "PM IER:\t\t%08x\n",
+			   I915_READ(GEN6_PMIER));
+		sbuf_printf(m, "PM IIR:\t\t%08x\n",
+			   I915_READ(GEN6_PMIIR));
+		sbuf_printf(m, "PM IMR:\t\t%08x\n",
+			   I915_READ(GEN6_PMIMR));
+
+		sbuf_printf(m, "Port hotplug:\t%08x\n",
+			   I915_READ(PORT_HOTPLUG_EN));
+		sbuf_printf(m, "DPFLIPSTAT:\t%08x\n",
+			   I915_READ(VLV_DPFLIPSTAT));
+		sbuf_printf(m, "DPINVGTT:\t%08x\n",
+			   I915_READ(DPINVGTT));
+
+	} else if (!HAS_PCH_SPLIT(dev)) {
 		sbuf_printf(m, "Interrupt enable:    %08x\n",
 			   I915_READ(IER));
 		sbuf_printf(m, "Interrupt identity:  %08x\n",
@@ -494,17 +505,16 @@
 	}
 	DRM_UNLOCK(dev);
 
-	return (0);
+	return 0;
 }
 
-static int
-i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int i;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 
 	sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
 	sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -520,21 +530,20 @@
 	}
 
 	DRM_UNLOCK(dev);
-	return (0);
+	return 0;
 }
 
-static int
-i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
-	const volatile u32 *hws;
+	const volatile u32 __iomem *hws;
 	int i;
 
 	ring = &dev_priv->rings[(uintptr_t)data];
 	hws = (volatile u32 *)ring->status_page.page_addr;
 	if (hws == NULL)
-		return (0);
+		return 0;
 
 	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
 		sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -541,84 +550,27 @@
 			   i * 4,
 			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
 	}
-	return (0);
+	return 0;
 }
 
-static int
-i915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data)
+static const char *ring_str(int ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_ring_buffer *ring;
-
-	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
-	ring = &dev_priv->rings[(uintptr_t)data];
-	if (!ring->obj) {
-		sbuf_printf(m, "No ringbuffer setup\n");
-	} else {
-		u8 *virt = ring->virtual_start;
-		uint32_t off;
-
-		for (off = 0; off < ring->size; off += 4) {
-			uint32_t *ptr = (uint32_t *)(virt + off);
-			sbuf_printf(m, "%08x :  %08x\n", off, *ptr);
-		}
-	}
-	DRM_UNLOCK(dev);
-	return (0);
-}
-
-static int
-i915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_ring_buffer *ring;
-
-	ring = &dev_priv->rings[(uintptr_t)data];
-	if (ring->size == 0)
-		return (0);
-
-	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
-
-	sbuf_printf(m, "Ring %s:\n", ring->name);
-	sbuf_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
-	sbuf_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
-	sbuf_printf(m, "  Size :    %08x\n", ring->size);
-	sbuf_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
-	sbuf_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
-	if (IS_GEN6(dev) || IS_GEN7(dev)) {
-		sbuf_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
-		sbuf_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
-	}
-	sbuf_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
-	sbuf_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
-
-	DRM_UNLOCK(dev);
-
-	return (0);
-}
-
-static const char *
-ring_str(int ring)
-{
 	switch (ring) {
-	case RCS: return (" render");
-	case VCS: return (" bsd");
-	case BCS: return (" blt");
-	default: return ("");
+	case RCS: return " render";
+	case VCS: return " bsd";
+	case BCS: return " blt";
+	default: return "";
 	}
 }
 
-static const char *
-pin_flag(int pinned)
+static const char *pin_flag(int pinned)
 {
 	if (pinned > 0)
-		return (" P");
+		return " P";
 	else if (pinned < 0)
-		return (" p");
+		return " p";
 	else
-		return ("");
+		return "";
 }
 
 static const char *tiling_flag(int tiling)
@@ -641,8 +593,10 @@
 	return purgeable ? " purgeable" : "";
 }
 
-static void print_error_buffers(struct sbuf *m, const char *name,
-    struct drm_i915_error_buffer *err, int count)
+static void print_error_buffers(struct sbuf *m,
+				const char *name,
+				struct drm_i915_error_buffer *err,
+				int count)
 {
 
 	sbuf_printf(m, "%s [%d]:\n", name, count);
@@ -672,11 +626,13 @@
 	}
 }
 
-static void
-i915_ring_error_state(struct sbuf *m, struct drm_device *dev,
-    struct drm_i915_error_state *error, unsigned ring)
+static void i915_ring_error_state(struct sbuf *m,
+				  struct drm_device *dev,
+				  struct drm_i915_error_state *error,
+				  unsigned ring)
 {
 
+	MPASS((ring < I915_NUM_RINGS));	/* shut up confused gcc */
 	sbuf_printf(m, "%s command stream:\n", ring_str(ring));
 	sbuf_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
 	sbuf_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
@@ -691,8 +647,8 @@
 	if (INTEL_INFO(dev)->gen >= 4)
 		sbuf_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 	sbuf_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
+	sbuf_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
 	if (INTEL_INFO(dev)->gen >= 6) {
-		sbuf_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
 		sbuf_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
 		sbuf_printf(m, "  SYNC_0: 0x%08x\n",
 			   error->semaphore_mboxes[ring][0]);
@@ -700,6 +656,7 @@
 			   error->semaphore_mboxes[ring][1]);
 	}
 	sbuf_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
+	sbuf_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
 	sbuf_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
 	sbuf_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
 }
@@ -709,12 +666,17 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_error_state *error;
+	struct intel_ring_buffer *ring;
 	int i, j, page, offset, elt;
 
 	mtx_lock(&dev_priv->error_lock);
-	if (!dev_priv->first_error) {
+	error = dev_priv->first_error;
+	if (error != NULL)
+		refcount_acquire(&error->ref);
+	mtx_unlock(&dev_priv->error_lock);
+	if (!error) {
 		sbuf_printf(m, "no error state collected\n");
-		goto out;
+		return 0;
 	}
 
 	error = dev_priv->first_error;
@@ -723,6 +685,7 @@
 	    (intmax_t)error->time.tv_usec);
 	sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
 	sbuf_printf(m, "EIR: 0x%08x\n", error->eir);
+	sbuf_printf(m, "IER: 0x%08x\n", error->ier);
 	sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -734,11 +697,8 @@
 		sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
 	}
 
-	i915_ring_error_state(m, dev, error, RCS);
-	if (HAS_BLT(dev))
-		i915_ring_error_state(m, dev, error, BCS);
-	if (HAS_BSD(dev))
-		i915_ring_error_state(m, dev, error, VCS);
+	for_each_ring(ring, dev_priv, i)
+		i915_ring_error_state(m, dev, error, i);
 
 	if (error->active_bo)
 		print_error_buffers(m, "Active",
@@ -750,9 +710,9 @@
 				    error->pinned_bo,
 				    error->pinned_bo_count);
 
-	for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 		struct drm_i915_error_object *obj;
- 
+
 		if ((obj = error->ring[i].batchbuffer)) {
 			sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n",
 				   dev_priv->rings[i].name,
@@ -801,9 +761,25 @@
 	if (error->display)
 		intel_display_print_error_state(m, dev, error->display);
 
-out:
+	if (refcount_release(&error->ref))
+		i915_error_state_free(error);
+
+	return 0;
+}
+
+static int
+i915_error_state_w(struct drm_device *dev, const char *str, void *unused)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_error_state *error;
+
+	DRM_DEBUG_DRIVER("Resetting error state\n");
+	mtx_lock(&dev_priv->error_lock);
+	error = dev_priv->first_error;
+	dev_priv->first_error = NULL;
 	mtx_unlock(&dev_priv->error_lock);
-
+	if (error != NULL && refcount_release(&error->ref))
+		i915_error_state_free(error);
 	return (0);
 }
 
@@ -814,8 +790,10 @@
 	u16 crstanddelay;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
+
 	crstanddelay = I915_READ16(CRSTANDVID);
+
 	DRM_UNLOCK(dev);
 
 	sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n",
@@ -824,8 +802,7 @@
 	return 0;
 }
 
-static int
-i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
+static int i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
@@ -850,7 +827,7 @@
 
 		/* RPSTAT1 is in the GT power well */
 		if (sx_xlock_sig(&dev->dev_struct_lock))
-			return (EINTR);
+			return -EINTR;
 		gen6_gt_force_wake_get(dev_priv);
 
 		rpstat = I915_READ(GEN6_RPSTAT1);
@@ -905,8 +882,7 @@
 	return 0;
 }
 
-static int
-i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused)
+static int i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 delayfreq;
@@ -913,24 +889,25 @@
 	int i;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
+
 	for (i = 0; i < 16; i++) {
 		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
 		sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
 			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
 	}
+
 	DRM_UNLOCK(dev);
-	return (0);
+
+	return 0;
 }
 
-static inline int
-MAP_TO_MV(int map)
+static inline int MAP_TO_MV(int map)
 {
 	return 1250 - (map * 25);
 }
 
-static int
-i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused)
+static int i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 inttoext;
@@ -937,18 +914,19 @@
 	int i;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
+
 	for (i = 1; i <= 32; i++) {
 		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
 		sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
 	}
+
 	DRM_UNLOCK(dev);
 
-	return (0);
+	return 0;
 }
 
-static int
-ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
+static int ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 rgvmodectl;
@@ -956,10 +934,12 @@
 	u16 crstandvid;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
+
 	rgvmodectl = I915_READ(MEMMODECTL);
 	rstdbyctl = I915_READ(RSTDBYCTL);
 	crstandvid = I915_READ16(CRSTANDVID);
+
 	DRM_UNLOCK(dev);
 
 	sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -1010,8 +990,7 @@
 	return 0;
 }
 
-static int
-gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
+static int gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 rpmodectl1, gt_core_status, rcctl1;
@@ -1018,8 +997,9 @@
 	unsigned forcewake_count;
 	int count=0;
 
+
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 
 	mtx_lock(&dev_priv->gt_lock);
 	forcewake_count = dev_priv->forcewake_count;
@@ -1031,7 +1011,7 @@
 	} else {
 		/* NB: we cannot use forcewake, else we read the wrong values */
 		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
-			DRM_UDELAY(10);
+			udelay(10);
 		sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51));
 	}
 
@@ -1081,6 +1061,17 @@
 
 	sbuf_printf(m, "Core Power Down: %s\n",
 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+
+	/* Not exactly sure what this is */
+	sbuf_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
+	sbuf_printf(m, "RC6 residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6));
+	sbuf_printf(m, "RC6+ residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6p));
+	sbuf_printf(m, "RC6++ residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6pp));
+
 	return 0;
 }
 
@@ -1088,12 +1079,12 @@
 {
 
 	if (IS_GEN6(dev) || IS_GEN7(dev))
-		return (gen6_drpc_info(dev, m));
+		return gen6_drpc_info(dev, m);
 	else
-		return (ironlake_drpc_info(dev, m));
+		return ironlake_drpc_info(dev, m);
 }
-static int
-i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
+
+static int i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
@@ -1135,8 +1126,7 @@
 	return 0;
 }
 
-static int
-i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
+static int i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool sr_enabled = false;
@@ -1153,9 +1143,33 @@
 	sbuf_printf(m, "self-refresh: %s",
 		   sr_enabled ? "enabled" : "disabled");
 
-	return (0);
+	return 0;
 }
 
+static int i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long temp, chipset, gfx;
+
+	if (!IS_GEN5(dev))
+		return -ENODEV;
+
+	if (sx_xlock_sig(&dev->dev_struct_lock))
+		return -EINTR;
+
+	temp = i915_mch_val(dev_priv);
+	chipset = i915_chipset_val(dev_priv);
+	gfx = i915_gfx_val(dev_priv);
+	DRM_UNLOCK(dev);
+
+	sbuf_printf(m, "GMCH temp: %ld\n", temp);
+	sbuf_printf(m, "Chipset power: %ld\n", chipset);
+	sbuf_printf(m, "GFX power: %ld\n", gfx);
+	sbuf_printf(m, "Total power: %ld\n", chipset + gfx);
+
+	return 0;
+}
+
 static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
     void *unused)
 {
@@ -1164,11 +1178,11 @@
 
 	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
 		sbuf_printf(m, "unsupported on this chipset");
-		return (0);
+		return 0;
 	}
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 
 	sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
 
@@ -1189,59 +1203,35 @@
 
 	DRM_UNLOCK(dev);
 
-	return (0);
+	return 0;
 }
 
-static int
-i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused)
+static int i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	unsigned long temp, chipset, gfx;
 
-	if (!IS_GEN5(dev)) {
-		sbuf_printf(m, "Not supported\n");
-		return (0);
-	}
-
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
-	temp = i915_mch_val(dev_priv);
-	chipset = i915_chipset_val(dev_priv);
-	gfx = i915_gfx_val(dev_priv);
-	DRM_UNLOCK(dev);
+		return -EINTR;
 
-	sbuf_printf(m, "GMCH temp: %ld\n", temp);
-	sbuf_printf(m, "Chipset power: %ld\n", chipset);
-	sbuf_printf(m, "GFX power: %ld\n", gfx);
-	sbuf_printf(m, "Total power: %ld\n", chipset + gfx);
+	sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
 
-	return (0);
-}
-
-static int
-i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
-	sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
 	DRM_UNLOCK(dev);
 
-	return (0);
+	return 0;
 }
 
 #if 0
-static int
-i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused)
+static int i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_opregion *opregion = &dev_priv->opregion;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
+
 	if (opregion->header)
 		seq_write(m, opregion->header, OPREGION_SIZE);
+
 	DRM_UNLOCK(dev);
 
 	return 0;
@@ -1248,8 +1238,7 @@
 }
 #endif
 
-static int
-i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_fbdev *ifbdev;
@@ -1256,12 +1245,12 @@
 	struct intel_framebuffer *fb;
 
 	if (sx_xlock_sig(&dev->dev_struct_lock))
-		return (EINTR);
+		return -EINTR;
 
 	ifbdev = dev_priv->fbdev;
 	if (ifbdev == NULL) {
 		DRM_UNLOCK(dev);
-		return (0);
+		return 0;
 	}
 	fb = to_intel_framebuffer(ifbdev->helper.fb);
 
@@ -1288,22 +1277,18 @@
 
 	DRM_UNLOCK(dev);
 
-	return (0);
+	return 0;
 }
 
-static int
-i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	drm_i915_private_t *dev_priv;
 	int ret;
 
-	if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
-		return (0);
-
 	dev_priv = dev->dev_private;
 	ret = sx_xlock_sig(&dev->mode_config.mutex);
 	if (ret != 0)
-		return (EINTR);
+		return -EINTR;
 
 	if (dev_priv->pwrctx != NULL) {
 		sbuf_printf(m, "power context ");
@@ -1319,11 +1304,10 @@
 
 	sx_xunlock(&dev->mode_config.mutex);
 
-	return (0);
+	return 0;
 }
 
-static int
-i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m,
+static int i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m,
     void *data)
 {
 	struct drm_i915_private *dev_priv;
@@ -1336,11 +1320,10 @@
 
 	sbuf_printf(m, "forcewake count = %u\n", forcewake_count);
 
-	return (0);
+	return 0;
 }
 
-static const char *
-swizzle_string(unsigned swizzle)
+static const char *swizzle_string(unsigned swizzle)
 {
 
 	switch(swizzle) {
@@ -1365,8 +1348,7 @@
 	return "bug";
 }
 
-static int
-i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	struct drm_i915_private *dev_priv;
 	int ret;
@@ -1374,7 +1356,7 @@
 	dev_priv = dev->dev_private;
 	ret = sx_xlock_sig(&dev->dev_struct_lock);
 	if (ret != 0)
-		return (EINTR);
+		return -EINTR;
 
 	sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n",
 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@@ -1401,14 +1383,13 @@
 			   I915_READ(ARB_MODE));
 		sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n",
 			   I915_READ(DISP_ARB_CTL));
- 	}
+	}
 	DRM_UNLOCK(dev);
 
-	return (0);
+	return 0;
 }
 
-static int
-i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
+static int i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
 {
 	struct drm_i915_private *dev_priv;
 	struct intel_ring_buffer *ring;
@@ -1418,7 +1399,7 @@
 
 	ret = sx_xlock_sig(&dev->dev_struct_lock);
 	if (ret != 0)
-		return (EINTR);
+		return -EINTR;
 	if (INTEL_INFO(dev)->gen == 6)
 		sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
@@ -1441,9 +1422,55 @@
 	sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
 	DRM_UNLOCK(dev);
 
-	return (0);
+	return 0;
 }
 
+static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+	struct drm_i915_private *dev_priv;
+	int ret;
+
+	if (!IS_VALLEYVIEW(dev)) {
+		sbuf_printf(m, "unsupported\n");
+		return 0;
+	}
+
+	dev_priv = dev->dev_private;
+
+	ret = sx_xlock_sig(&dev->mode_config.mutex);
+	if (ret != 0)
+		return -EINTR;
+
+	sbuf_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
+
+	sbuf_printf(m, "DPIO_DIV_A: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_DIV_A));
+	sbuf_printf(m, "DPIO_DIV_B: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_DIV_B));
+
+	sbuf_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
+	sbuf_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
+
+	sbuf_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+	sbuf_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+
+	sbuf_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
+	sbuf_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
+
+	sbuf_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
+		   intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+
+	sx_xunlock(&dev->mode_config.mutex);
+
+	return 0;
+}
+
 static int
 i915_debug_set_wedged(SYSCTL_HANDLER_ARGS)
 {
@@ -1520,57 +1547,77 @@
 	return (0);
 }
 
+static int
+i915_stop_rings(SYSCTL_HANDLER_ARGS)
+{
+	struct drm_device *dev;
+	drm_i915_private_t *dev_priv;
+	int error, val;
+
+	dev = arg1;
+	dev_priv = dev->dev_private;
+	if (dev_priv == NULL)
+		return (EBUSY);
+	DRM_LOCK(dev);
+	val = dev_priv->stop_rings;
+	DRM_UNLOCK(dev);
+	error = sysctl_handle_int(oidp, &val, 0, req);
+	if (error || !req->newptr)
+		return (error);
+	DRM_DEBUG("Stopping rings 0x%08x\n", val);
+
+	DRM_LOCK(dev);
+	dev_priv->stop_rings = val;
+	DRM_UNLOCK(dev);
+	return (0);
+}
+
 static struct i915_info_sysctl_list {
 	const char *name;
 	int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data);
+	int (*ptr_w)(struct drm_device *dev, const char *str, void *data);
 	int flags;
 	void *data;
 } i915_info_sysctl_list[] = {
-	{"i915_capabilities", i915_capabilities, 0},
-	{"i915_gem_objects", i915_gem_object_info, 0},
-	{"i915_gem_gtt", i915_gem_gtt_info, 0},
-	{"i915_gem_active", i915_gem_object_list_info, 0, (void *)ACTIVE_LIST},
-	{"i915_gem_flushing", i915_gem_object_list_info, 0,
+	{"i915_capabilities", i915_capabilities, NULL, 0},
+	{"i915_gem_objects", i915_gem_object_info, NULL, 0},
+	{"i915_gem_gtt", i915_gem_gtt_info, NULL, 0},
+	{"i915_gem_pinned", i915_gem_gtt_info, NULL, 0, (void *)PINNED_LIST},
+	{"i915_gem_active", i915_gem_object_list_info, NULL, 0,
+	    (void *)ACTIVE_LIST},
+	{"i915_gem_flushing", i915_gem_object_list_info, NULL, 0,
 	    (void *)FLUSHING_LIST},
-	{"i915_gem_inactive", i915_gem_object_list_info, 0,
+	{"i915_gem_inactive", i915_gem_object_list_info, NULL, 0,
 	    (void *)INACTIVE_LIST},
-	{"i915_gem_pinned", i915_gem_object_list_info, 0,
-	    (void *)PINNED_LIST},
-	{"i915_gem_deferred_free", i915_gem_object_list_info, 0,
-	    (void *)DEFERRED_FREE_LIST},
-	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
-	{"i915_gem_request", i915_gem_request_info, 0},
-	{"i915_gem_seqno", i915_gem_seqno_info, 0},
-	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
-	{"i915_gem_interrupt", i915_interrupt_info, 0},
-	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
-	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
-	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
-	{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
-	{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
-	{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
-	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
-	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
-	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
-	{"i915_error_state", i915_error_state, 0},
-	{"i915_rstdby_delays", i915_rstdby_delays, 0},
-	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
-	{"i915_delayfreq_table", i915_delayfreq_table, 0},
-	{"i915_inttoext_table", i915_inttoext_table, 0},
-	{"i915_drpc_info", i915_drpc_info, 0},
-	{"i915_emon_status", i915_emon_status, 0},
-	{"i915_ring_freq_table", i915_ring_freq_table, 0},
-	{"i915_gfxec", i915_gfxec, 0},
-	{"i915_fbc_status", i915_fbc_status, 0},
-	{"i915_sr_status", i915_sr_status, 0},
+	{"i915_gem_pageflip", i915_gem_pageflip_info, NULL, 0},
+	{"i915_gem_request", i915_gem_request_info, NULL, 0},
+	{"i915_gem_seqno", i915_gem_seqno_info, NULL, 0},
+	{"i915_gem_fence_regs", i915_gem_fence_regs_info, NULL, 0},
+	{"i915_gem_interrupt", i915_interrupt_info, NULL, 0},
+	{"i915_gem_hws", i915_hws_info, NULL, 0, (void *)RCS},
+	{"i915_gem_hws_blt", i915_hws_info, NULL, 0, (void *)BCS},
+	{"i915_gem_hws_bsd", i915_hws_info, NULL, 0, (void *)VCS},
+	{"i915_error_state", i915_error_state, i915_error_state_w, 0},
+	{"i915_rstdby_delays", i915_rstdby_delays, NULL, 0},
+	{"i915_cur_delayinfo", i915_cur_delayinfo, NULL, 0},
+	{"i915_delayfreq_table", i915_delayfreq_table, NULL, 0},
+	{"i915_inttoext_table", i915_inttoext_table, NULL, 0},
+	{"i915_drpc_info", i915_drpc_info, NULL, 0},
+	{"i915_emon_status", i915_emon_status, NULL, 0},
+	{"i915_ring_freq_table", i915_ring_freq_table, NULL, 0},
+	{"i915_gfxec", i915_gfxec, NULL, 0},
+	{"i915_fbc_status", i915_fbc_status, NULL, 0},
+	{"i915_sr_status", i915_sr_status, NULL, 0},
 #if 0
-	{"i915_opregion", i915_opregion, 0},
+	{"i915_opregion", i915_opregion, NULL, 0},
 #endif
-	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
-	{"i915_context_status", i915_context_status, 0},
-	{"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 0},
-	{"i915_swizzle_info", i915_swizzle_info, 0},
-	{"i915_ppgtt_info", i915_ppgtt_info, 0},
+	{"i915_gem_framebuffer", i915_gem_framebuffer_info, NULL, 0},
+	{"i915_context_status", i915_context_status, NULL, 0},
+	{"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info,
+	    NULL, 0},
+	{"i915_swizzle_info", i915_swizzle_info, NULL, 0},
+	{"i915_ppgtt_info", i915_ppgtt_info, NULL, 0},
+	{"i915_dpio", i915_dpio_info, NULL, 0},
 };
 
 struct i915_info_sysctl_thunk {
@@ -1586,6 +1633,7 @@
 	struct i915_info_sysctl_thunk *thunk;
 	struct drm_device *dev;
 	drm_i915_private_t *dev_priv;
+	char *p;
 	int error;
 
 	thunk = arg1;
@@ -1597,11 +1645,24 @@
 	if (error != 0)
 		return (error);
 	sbuf_new_for_sysctl(&m, NULL, 128, req);
-	error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m,
+	error = -i915_info_sysctl_list[thunk->idx].ptr(dev, &m,
 	    thunk->arg);
 	if (error == 0)
 		error = sbuf_finish(&m);
 	sbuf_delete(&m);
+	if (error != 0 || req->newptr == NULL)
+		return (error);
+	if (req->newlen > 2048)
+		return (E2BIG);
+	p = malloc(req->newlen + 1, M_TEMP, M_WAITOK);
+	error = SYSCTL_IN(req, p, req->newlen);
+	if (error != 0)
+		goto out;
+	p[req->newlen] = '\0';
+	error = i915_info_sysctl_list[thunk->idx].ptr_w(dev, p,
+	    thunk->arg);
+out:
+	free(p, M_TEMP);
 	return (error);
 }
 
@@ -1618,9 +1679,9 @@
 	struct i915_info_sysctl_thunk *thunks;
 	int i, error;
 
-	thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list),
+	thunks = malloc(sizeof(*thunks) * ARRAY_SIZE(i915_info_sysctl_list),
 	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-	for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
+	for (i = 0; i < ARRAY_SIZE(i915_info_sysctl_list); i++) {
 		thunks[i].dev = dev;
 		thunks[i].idx = i;
 		thunks[i].arg = i915_info_sysctl_list[i].data;
@@ -1629,13 +1690,15 @@
 	info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info",
 	    CTLFLAG_RW, NULL, NULL);
 	if (info == NULL)
-		return (ENOMEM);
-	for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
+		return (-ENOMEM);
+	for (i = 0; i < ARRAY_SIZE(i915_info_sysctl_list); i++) {
 		oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
-		    i915_info_sysctl_list[i].name, CTLTYPE_STRING | CTLFLAG_RD,
+		    i915_info_sysctl_list[i].name, CTLTYPE_STRING |
+		    (i915_info_sysctl_list[i].ptr_w != NULL ? CTLFLAG_RW :
+		    CTLFLAG_RD),
 		    &thunks[i], 0, i915_info_sysctl_handler, "A", NULL);
 		if (oid == NULL)
-			return (ENOMEM);
+			return (-ENOMEM);
 	}
 	oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
 	    "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt,
@@ -1644,29 +1707,34 @@
 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0,
 	    i915_debug_set_wedged, "I", NULL);
 	if (oid == NULL)
-		return (ENOMEM);
+		return (-ENOMEM);
 	oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq",
 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq,
 	    "I", NULL);
 	if (oid == NULL)
-		return (ENOMEM);
+		return (-ENOMEM);
 	oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
 	    "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
 	    0, i915_cache_sharing, "I", NULL);
 	if (oid == NULL)
-		return (ENOMEM);
+		return (-ENOMEM);
+	oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
+	    "stop_rings", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
+	    0, i915_stop_rings, "I", NULL);
+	if (oid == NULL)
+		return (-ENOMEM);
 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec",
 	    CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL);
 	if (oid == NULL)
-		return (ENOMEM);
+		return (-ENOMEM);
 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi",
 	    CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL);
 	if (oid == NULL)
-		return (ENOMEM);
+		return (-ENOMEM);
 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf",
 	    CTLFLAG_RW, &i915_intr_pf, 0, NULL);
 	if (oid == NULL)
-		return (ENOMEM);
+		return (-ENOMEM);
 
 	error = drm_add_busid_modesetting(dev, ctx, top);
 	if (error != 0)

Modified: trunk/sys/dev/drm2/i915/i915_dma.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_dma.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_dma.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  */
 /*-
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_dma.c 239965 2012-09-01 05:35:48Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_dma.c 295623 2016-02-15 07:35:40Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -36,22 +37,48 @@
 #include <dev/drm2/i915/intel_drv.h>
 #include <dev/drm2/i915/intel_ringbuffer.h>
 
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- *   - i915_mch_dev
- *   - dev_priv->max_delay
- *   - dev_priv->min_delay
- *   - dev_priv->fmax
- *   - dev_priv->gpu_busy
- */
-static struct mtx mchdev_lock;
-MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF);
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
 
-static void i915_pineview_get_mem_freq(struct drm_device *dev);
-static void i915_ironlake_get_mem_freq(struct drm_device *dev);
-static int i915_driver_unload_int(struct drm_device *dev, bool locked);
+#define BEGIN_LP_RING(n) \
+	intel_ring_begin(LP_RING(dev_priv), (n))
 
+#define OUT_RING(x) \
+	intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+	intel_ring_advance(LP_RING(dev_priv))
+
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
+	if (LP_RING(dev->dev_private)->obj == NULL)			\
+		LOCK_TEST_WITH_RETURN(dev, file);			\
+} while (0)
+
+static inline u32
+intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
+{
+	if (I915_NEED_GFX_HWS(dev_priv->dev))
+		return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
+	else
+		return intel_read_status_page(LP_RING(dev_priv), reg);
+}
+
+#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_BREADCRUMB_INDEX		0x21
+
+void i915_update_dri1_breadcrumb(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv;
+
+	if (dev->primary->master) {
+		master_priv = dev->primary->master->driver_priv;
+		if (master_priv->sarea_priv)
+			master_priv->sarea_priv->last_dispatch =
+				READ_BREADCRUMB(dev_priv);
+	}
+}
+
 static void i915_write_hws_pga(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -78,10 +105,8 @@
 	 * of allocation is used on <= 965 hardware, that has several
 	 * erratas regarding the use of physical memory > 4 GB.
 	 */
-	DRM_UNLOCK(dev);
 	dev_priv->status_page_dmah =
-		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-	DRM_LOCK(dev);
+		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
 	if (!dev_priv->status_page_dmah) {
 		DRM_ERROR("Can not allocate hardware status page\n");
 		return -ENOMEM;
@@ -115,7 +140,8 @@
 	if (dev_priv->status_gfx_addr) {
 		dev_priv->status_gfx_addr = 0;
 		ring->status_page.gfx_addr = 0;
-		drm_core_ioremapfree(&dev_priv->hws_map, dev);
+		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
+		    PAGE_SIZE);
 	}
 
 	/* Need to rewrite hardware status page */
@@ -125,6 +151,7 @@
 void i915_kernel_lost_context(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv;
 	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	/*
@@ -140,15 +167,12 @@
 	if (ring->space < 0)
 		ring->space += ring->size;
 
-#if 1
-	KIB_NOTYET();
-#else
 	if (!dev->primary->master)
 		return;
-#endif
 
-	if (ring->head == ring->tail && dev_priv->sarea_priv)
-		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+	master_priv = dev->primary->master->driver_priv;
+	if (ring->head == ring->tail && master_priv->sarea_priv)
+		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
 static int i915_dma_cleanup(struct drm_device * dev)
@@ -164,8 +188,10 @@
 	if (dev->irq_enabled)
 		drm_irq_uninstall(dev);
 
+	DRM_LOCK(dev);
 	for (i = 0; i < I915_NUM_RINGS; i++)
 		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
+	DRM_UNLOCK(dev);
 
 	/* Clear the HWS virtual address at teardown */
 	if (I915_NEED_GFX_HWS(dev))
@@ -177,18 +203,17 @@
 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 	int ret;
 
-	dev_priv->sarea = drm_getsarea(dev);
-	if (!dev_priv->sarea) {
-		DRM_ERROR("can not find sarea!\n");
-		i915_dma_cleanup(dev);
-		return -EINVAL;
+	master_priv->sarea = drm_getsarea(dev);
+	if (master_priv->sarea) {
+		master_priv->sarea_priv = (drm_i915_sarea_t *)
+		    ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
+	} else {
+		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
 	}
 
-	dev_priv->sarea_priv = (drm_i915_sarea_t *)
-	    ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
-
 	if (init->ring_size != 0) {
 		if (LP_RING(dev_priv)->obj != NULL) {
 			i915_dma_cleanup(dev);
@@ -210,11 +235,12 @@
 	dev_priv->back_offset = init->back_offset;
 	dev_priv->front_offset = init->front_offset;
 	dev_priv->current_page = 0;
-	dev_priv->sarea_priv->pf_current_page = 0;
+	if (master_priv->sarea_priv)
+		master_priv->sarea_priv->pf_current_page = 0;
 
 	/* Allow hardware batchbuffers unless told otherwise.
 	 */
-	dev_priv->allow_batchbuffer = 1;
+	dev_priv->dri1.allow_batchbuffer = 1;
 
 	return 0;
 }
@@ -226,7 +252,7 @@
 
 	DRM_DEBUG("\n");
 
-	if (ring->map.handle == NULL) {
+	if (ring->virtual_start == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
 		return -ENOMEM;
@@ -254,6 +280,9 @@
 	drm_i915_init_t *init = data;
 	int retcode = 0;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
 	switch (init->func) {
 	case I915_INIT_DMA:
 		retcode = i915_initialize(dev, init);
@@ -447,11 +476,12 @@
 static void i915_emit_breadcrumb(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
 	if (++dev_priv->counter > 0x7FFFFFFFUL)
 		dev_priv->counter = 0;
-	if (dev_priv->sarea_priv)
-		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+	if (master_priv->sarea_priv)
+		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
 	if (BEGIN_LP_RING(4) == 0) {
 		OUT_RING(MI_STORE_DWORD_INDEX);
@@ -502,6 +532,9 @@
 	int nbox = batch->num_cliprects;
 	int i, count, ret;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
 	if ((batch->start | batch->used) & 0x7) {
 		DRM_ERROR("alignment\n");
 		return -EINVAL;
@@ -553,15 +586,17 @@
 static int i915_dispatch_flip(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv =
+		dev->primary->master->driver_priv;
 	int ret;
 
-	if (!dev_priv->sarea_priv)
+	if (!master_priv->sarea_priv)
 		return -EINVAL;
 
 	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
 		  __func__,
 		  dev_priv->current_page,
-		  dev_priv->sarea_priv->pf_current_page);
+		  master_priv->sarea_priv->pf_current_page);
 
 	i915_kernel_lost_context(dev);
 
@@ -587,10 +622,7 @@
 
 	ADVANCE_LP_RING();
 
-	if (++dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->counter = 0;
-	if (dev_priv->sarea_priv)
-		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
 
 	if (BEGIN_LP_RING(4) == 0) {
 		OUT_RING(MI_STORE_DWORD_INDEX);
@@ -600,7 +632,7 @@
 		ADVANCE_LP_RING();
 	}
 
-	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 	return 0;
 }
 
@@ -618,6 +650,9 @@
 {
 	int ret;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
 	DRM_LOCK(dev);
@@ -631,17 +666,18 @@
 			    struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	drm_i915_sarea_t *sarea_priv;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+	    master_priv->sarea_priv;
 	drm_i915_batchbuffer_t *batch = data;
 	struct drm_clip_rect *cliprects;
 	size_t cliplen;
 	int ret;
 
-	if (!dev_priv->allow_batchbuffer) {
+	if (!dev_priv->dri1.allow_batchbuffer) {
 		DRM_ERROR("Batchbuffer ioctl disabled\n");
 		return -EINVAL;
 	}
-	DRM_UNLOCK(dev);
 
 	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
 		  batch->start, batch->used, batch->num_cliprects);
@@ -656,10 +692,8 @@
 
 		ret = -copyin(batch->cliprects, cliprects,
 		    batch->num_cliprects * sizeof(struct drm_clip_rect));
-		if (ret != 0) {
-			DRM_LOCK(dev);
+		if (ret != 0)
 			goto fail_free;
-		}
 	} else
 		cliprects = NULL;
 
@@ -666,8 +700,8 @@
 	DRM_LOCK(dev);
 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
+	DRM_UNLOCK(dev);
 
-	sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
 	if (sarea_priv)
 		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
@@ -680,12 +714,17 @@
 			  struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	drm_i915_sarea_t *sarea_priv;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+	    master_priv->sarea_priv;
 	drm_i915_cmdbuffer_t *cmdbuf = data;
 	struct drm_clip_rect *cliprects = NULL;
 	void *batch_data;
 	int ret;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
 	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
 		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
 
@@ -692,15 +731,11 @@
 	if (cmdbuf->num_cliprects < 0)
 		return -EINVAL;
 
-	DRM_UNLOCK(dev);
-
 	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
 
 	ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
-	if (ret != 0) {
-		DRM_LOCK(dev);
+	if (ret != 0)
 		goto fail_batch_free;
-	}
 
 	if (cmdbuf->num_cliprects) {
 		cliprects = malloc(cmdbuf->num_cliprects *
@@ -708,21 +743,19 @@
 		    M_WAITOK | M_ZERO);
 		ret = -copyin(cmdbuf->cliprects, cliprects,
 		    cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
-		if (ret != 0) {
-			DRM_LOCK(dev);
+		if (ret != 0)
 			goto fail_clip_free;
-		}
 	}
 
 	DRM_LOCK(dev);
 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
+	DRM_UNLOCK(dev);
 	if (ret) {
 		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
 		goto fail_clip_free;
 	}
 
-	sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
 	if (sarea_priv)
 		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
@@ -733,16 +766,184 @@
 	return ret;
 }
 
+static int i915_emit_irq(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+	i915_kernel_lost_context(dev);
+
+	DRM_DEBUG("i915: emit_irq\n");
+
+	dev_priv->counter++;
+	if (dev_priv->counter > 0x7FFFFFFFUL)
+		dev_priv->counter = 1;
+	if (master_priv->sarea_priv)
+		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(MI_USER_INTERRUPT);
+		ADVANCE_LP_RING();
+	}
+
+	return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	int ret;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+		  READ_BREADCRUMB(dev_priv));
+
+	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+		if (master_priv->sarea_priv)
+			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+		return 0;
+	}
+
+	if (master_priv->sarea_priv)
+		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+	ret = 0;
+	mtx_lock(&dev_priv->irq_lock);
+	if (ring->irq_get(ring)) {
+		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
+			ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
+			    "915wtq", 3 * hz);
+			if (ret == -ERESTART)
+				ret = -ERESTARTSYS;
+		}
+		ring->irq_put(ring);
+		mtx_unlock(&dev_priv->irq_lock);
+	} else {
+		mtx_unlock(&dev_priv->irq_lock);
+		if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
+		     3000, 1, "915wir"))
+			ret = -EBUSY;
+	}
+
+	if (ret == -EBUSY) {
+		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+	}
+
+	return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int i915_irq_emit(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_irq_emit_t *emit = data;
+	int result;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_LOCK(dev);
+	result = i915_emit_irq(dev);
+	DRM_UNLOCK(dev);
+
+	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+static int i915_irq_wait(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_irq_wait_t *irqwait = data;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_pipe_t *pipe = data;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+	return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+static int i915_vblank_swap(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	/* The delayed swap mechanism was fundamentally racy, and has been
+	 * removed.  The model was that the client requested a delayed flip/swap
+	 * from the kernel, then waited for vblank before continuing to perform
+	 * rendering.  The problem was that the kernel might wake the client
+	 * up before it dispatched the vblank swap (since the lock has to be
+	 * held while touching the ringbuffer), in which case the client would
+	 * clear and start the next frame before the swap occurred, and
+	 * flicker would occur in addition to likely missing the vblank.
+	 *
+	 * In the absence of this ioctl, userland falls back to a correct path
+	 * of waiting for a vblank, then dispatching the swap on its own.
+	 * Context switching to userland and back is plenty fast enough for
+	 * meeting the requirements of vblank swapping.
+	 */
+	return -EINVAL;
+}
+
 static int i915_flip_bufs(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv)
 {
 	int ret;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
 	DRM_DEBUG("%s\n", __func__);
 
 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
+	DRM_LOCK(dev);
 	ret = i915_dispatch_flip(dev);
+	DRM_UNLOCK(dev);
 
 	return ret;
 }
@@ -764,7 +965,7 @@
 		value = dev->irq_enabled ? 1 : 0;
 		break;
 	case I915_PARAM_ALLOW_BATCHBUFFER:
-		value = dev_priv->allow_batchbuffer ? 1 : 0;
+		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
 		break;
 	case I915_PARAM_LAST_DISPATCH:
 		value = READ_BREADCRUMB(dev_priv);
@@ -788,10 +989,10 @@
 		value = 1;
 		break;
 	case I915_PARAM_HAS_BSD:
-		value = HAS_BSD(dev);
+		value = intel_ring_initialized(&dev_priv->rings[VCS]);
 		break;
 	case I915_PARAM_HAS_BLT:
-		value = HAS_BLT(dev);
+		value = intel_ring_initialized(&dev_priv->rings[BCS]);
 		break;
 	case I915_PARAM_HAS_RELAXED_FENCING:
 		value = 1;
@@ -811,6 +1012,9 @@
 	case I915_PARAM_HAS_LLC:
 		value = HAS_LLC(dev);
 		break;
+	case I915_PARAM_HAS_ALIASING_PPGTT:
+		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -840,10 +1044,9 @@
 	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
 		break;
 	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
-		dev_priv->tex_lru_log_granularity = param->value;
 		break;
 	case I915_SETPARAM_ALLOW_BATCHBUFFER:
-		dev_priv->allow_batchbuffer = param->value;
+		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
 		break;
 	case I915_SETPARAM_NUM_USED_FENCES:
 		if (param->value > dev_priv->num_fence_regs ||
@@ -867,6 +1070,9 @@
 	drm_i915_hws_addr_t *hws = data;
 	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
 	if (!I915_NEED_GFX_HWS(dev))
 		return -EINVAL;
 
@@ -884,14 +1090,10 @@
 	ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
 	    hws->addr & (0x1ffff<<12);
 
-	dev_priv->hws_map.offset = dev->agp->base + hws->addr;
-	dev_priv->hws_map.size = 4*1024;
-	dev_priv->hws_map.type = 0;
-	dev_priv->hws_map.flags = 0;
-	dev_priv->hws_map.mtrr = 0;
-
-	drm_core_ioremap_wc(&dev_priv->hws_map, dev);
-	if (dev_priv->hws_map.virtual == NULL) {
+	dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr(
+	    dev->agp->base + hws->addr, PAGE_SIZE,
+	    VM_MEMATTR_WRITE_COMBINING);
+	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
 		i915_dma_cleanup(dev);
 		ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
 		DRM_ERROR("can not ioremap virtual address for"
@@ -898,10 +1100,8 @@
 				" G33 hw status page\n");
 		return -ENOMEM;
 	}
-	ring->status_page.page_addr = dev_priv->hw_status_page =
-	    dev_priv->hws_map.virtual;
 
-	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
 	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 	DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
 			dev_priv->status_gfx_addr);
@@ -909,91 +1109,7 @@
 	return 0;
 }
 
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
-	if (i915_enable_ppgtt >= 0)
-		return i915_enable_ppgtt;
-
-	/* Disable ppgtt on SNB if VT-d is on. */
-	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
-		return false;
-
-	return true;
-}
-
 static int
-i915_load_gem_init(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long prealloc_size, gtt_size, mappable_size;
-	int ret;
-
-	prealloc_size = dev_priv->mm.gtt.stolen_size;
-	gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT;
-	mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT;
-
-	/* Basic memrange allocator for stolen space */
-	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
-
-	DRM_LOCK(dev);
-	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
-		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
-		 * aperture accordingly when using aliasing ppgtt. */
-		gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
-		/* For paranoia keep the guard page in between. */
-		gtt_size -= PAGE_SIZE;
-
-		i915_gem_do_init(dev, 0, mappable_size, gtt_size);
-
-		ret = i915_gem_init_aliasing_ppgtt(dev);
-		if (ret) {
-			DRM_UNLOCK(dev);
-			return ret;
-		}
-	} else {
-		/* Let GEM Manage all of the aperture.
-		 *
-		 * However, leave one page at the end still bound to the scratch
-		 * page.  There are a number of places where the hardware
-		 * apparently prefetches past the end of the object, and we've
-		 * seen multiple hangs with the GPU head pointer stuck in a
-		 * batchbuffer bound at the last page of the aperture.  One page
-		 * should be enough to keep any prefetching inside of the
-		 * aperture.
-		 */
-		i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
-	}
-
-	ret = i915_gem_init_hw(dev);
-	DRM_UNLOCK(dev);
-	if (ret != 0) {
-		i915_gem_cleanup_aliasing_ppgtt(dev);
-		return (ret);
-	}
-
-#if 0
-	/* Try to set up FBC with a reasonable compressed buffer size */
-	if (I915_HAS_FBC(dev) && i915_powersave) {
-		int cfb_size;
-
-		/* Leave 1M for line length buffer & misc. */
-
-		/* Try to get a 32M buffer... */
-		if (prealloc_size > (36*1024*1024))
-			cfb_size = 32*1024*1024;
-		else /* fall back to 7/8 of the stolen space */
-			cfb_size = prealloc_size * 7 / 8;
-		i915_setup_compression(dev, cfb_size);
-	}
-#endif
-
-	/* Allow hardware batchbuffers unless told otherwise. */
-	dev_priv->allow_batchbuffer = 1;
-	return 0;
-}
-
-static int
 i915_load_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1007,15 +1123,18 @@
 	intel_register_dsm_handler();
 #endif
 
-	/* IIR "flip pending" bit means done if this bit is set */
-	if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
-		dev_priv->flip_pending_is_done = true;
+	/* Initialise stolen first so that we may reserve preallocated
+	 * objects for the BIOS to KMS transition.
+	 */
+	ret = i915_gem_init_stolen(dev);
+	if (ret)
+		goto cleanup_vga_switcheroo;
 
 	intel_modeset_init(dev);
 
-	ret = i915_load_gem_init(dev);
+	ret = i915_gem_init(dev);
 	if (ret != 0)
-		goto cleanup_gem;
+		goto cleanup_gem_stolen;
 
 	intel_modeset_gem_init(dev);
 
@@ -1041,9 +1160,37 @@
 	i915_gem_cleanup_ringbuffer(dev);
 	DRM_UNLOCK(dev);
 	i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_gem_stolen:
+	i915_gem_cleanup_stolen(dev);
+cleanup_vga_switcheroo:
 	return (ret);
 }
 
+int i915_master_create(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_i915_master_private *master_priv;
+
+	master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA,
+	    M_NOWAIT | M_ZERO);
+	if (!master_priv)
+		return -ENOMEM;
+
+	master->driver_priv = master_priv;
+	return 0;
+}
+
+void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_i915_master_private *master_priv = master->driver_priv;
+
+	if (!master_priv)
+		return;
+
+	free(master_priv, DRM_MEM_DMA);
+
+	master->driver_priv = NULL;
+}
+
 static int
 i915_get_bridge_dev(struct drm_device *dev)
 {
@@ -1094,10 +1241,10 @@
 #endif
 
 	/* Get some space for it */
-	vga = device_get_parent(dev->device);
+	vga = device_get_parent(dev->dev);
 	dev_priv->mch_res_rid = 0x100;
 	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
-	    dev->device, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
+	    dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
 	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
 	if (dev_priv->mch_res == NULL) {
 		DRM_ERROR("failed mchbar resource alloc\n");
@@ -1184,10 +1331,10 @@
 	}
 
 	if (dev_priv->mch_res != NULL) {
-		vga = device_get_parent(dev->device);
-		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->device,
+		vga = device_get_parent(dev->dev);
+		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
 		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
-		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->device,
+		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
 		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
 		dev_priv->mch_res = NULL;
 	}
@@ -1197,9 +1344,17 @@
 i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct intel_device_info *info;
 	unsigned long base, size;
 	int mmio_bar, ret;
 
+	info = i915_get_device_id(dev->pci_device);
+
+	/* Refuse to load on gen6+ without kms enabled. */
+	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+
 	ret = 0;
 
 	/* i915 has 4 more counters */
@@ -1211,12 +1366,10 @@
 
 	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
 	    M_ZERO | M_WAITOK);
-	if (dev_priv == NULL)
-		return -ENOMEM;
 
 	dev->dev_private = (void *)dev_priv;
 	dev_priv->dev = dev;
-	dev_priv->info = i915_get_device_id(dev->pci_device);
+	dev_priv->info = info;
 
 	if (i915_get_bridge_dev(dev)) {
 		free(dev_priv, DRM_MEM_DRIVER);
@@ -1231,6 +1384,11 @@
 
 	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
 	    _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
+	if (ret != 0) {
+		DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
+		free(dev_priv, DRM_MEM_DRIVER);
+		return (ret);
+	}
 
 	dev_priv->tq = taskqueue_create("915", M_WAITOK,
 	    taskqueue_thread_enqueue, &dev_priv->tq);
@@ -1239,8 +1397,8 @@
 	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
 	mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
 	mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
+	mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
 
-	dev_priv->has_gem = 1;
 	intel_irq_init(dev);
 
 	intel_setup_mchbar(dev);
@@ -1251,25 +1409,33 @@
 
 	i915_gem_load(dev);
 
+	/* On the 945G/GM, the chipset reports the MSI capability on the
+	 * integrated graphics even though the support isn't actually there
+	 * according to the published specs.  It doesn't appear to function
+	 * correctly in testing on 945G.
+	 * This may be a side effect of MSI having been made available for PEG
+	 * and the registers being closely associated.
+	 *
+	 * According to chipset errata, on the 965GM, MSI interrupts may
+	 * be lost or delayed, but we use them anyways to avoid
+	 * stuck interrupts on some machines.
+	 */
+	if (!IS_I945G(dev) && !IS_I945GM(dev))
+		drm_pci_enable_msi(dev);
+
 	/* Init HWS */
 	if (!I915_NEED_GFX_HWS(dev)) {
 		ret = i915_init_phys_hws(dev);
 		if (ret != 0) {
 			drm_rmmap(dev, dev_priv->mmio_map);
-			drm_free(dev_priv, sizeof(struct drm_i915_private),
-			    DRM_MEM_DRIVER);
+			free(dev_priv, DRM_MEM_DRIVER);
 			return ret;
 		}
 	}
 
-	if (IS_PINEVIEW(dev))
-		i915_pineview_get_mem_freq(dev);
-	else if (IS_GEN5(dev))
-		i915_ironlake_get_mem_freq(dev);
-
 	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
 
-	if (IS_IVYBRIDGE(dev))
+	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 		dev_priv->num_pipe = 3;
 	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
 		dev_priv->num_pipe = 2;
@@ -1286,9 +1452,7 @@
 	intel_detect_pch(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		DRM_UNLOCK(dev);
 		ret = i915_load_modeset_init(dev);
-		DRM_LOCK(dev);
 		if (ret < 0) {
 			DRM_ERROR("failed to init modeset\n");
 			goto out_gem_unload;
@@ -1295,6 +1459,8 @@
 		}
 	}
 
+	pci_enable_busmaster(dev->dev);
+
 	intel_opregion_init(dev);
 
 	callout_init(&dev_priv->hangcheck_timer, 1);
@@ -1301,41 +1467,34 @@
 	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
 	    i915_hangcheck_elapsed, dev);
 
-	if (IS_GEN5(dev)) {
-		mtx_lock(&mchdev_lock);
-		i915_mch_dev = dev_priv;
-		dev_priv->mchdev_lock = &mchdev_lock;
-		mtx_unlock(&mchdev_lock);
-	}
+	if (IS_GEN5(dev))
+		intel_gpu_ips_init(dev_priv);
 
 	return (0);
 
 out_gem_unload:
 	/* XXXKIB */
-	(void) i915_driver_unload_int(dev, true);
+	(void) i915_driver_unload(dev);
 	return (ret);
 }
 
-static int
-i915_driver_unload_int(struct drm_device *dev, bool locked)
+int
+i915_driver_unload(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	if (!locked)
-		DRM_LOCK(dev);
-	ret = i915_gpu_idle(dev, true);
+	DRM_LOCK(dev);
+	ret = i915_gpu_idle(dev);
 	if (ret)
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
-	if (!locked)
-		DRM_UNLOCK(dev);
+	i915_gem_retire_requests(dev);
+	DRM_UNLOCK(dev);
 
 	i915_free_hws(dev);
 
 	intel_teardown_mchbar(dev);
 
-	if (locked)
-		DRM_UNLOCK(dev);
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		intel_fbdev_fini(dev);
 		intel_modeset_cleanup(dev);
@@ -1347,18 +1506,17 @@
 
 	i915_destroy_error_state(dev);
 
+	if (dev->msi_enabled)
+		drm_pci_disable_msi(dev);
+
 	intel_opregion_fini(dev);
 
-	if (locked)
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		DRM_LOCK(dev);
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		if (!locked)
-			DRM_LOCK(dev);
 		i915_gem_free_all_phys_object(dev);
 		i915_gem_cleanup_ringbuffer(dev);
-		if (!locked)
-			DRM_UNLOCK(dev);
+		i915_gem_context_fini(dev);
+		DRM_UNLOCK(dev);
 		i915_gem_cleanup_aliasing_ppgtt(dev);
 #if 1
 		KIB_NOTYET();
@@ -1381,27 +1539,20 @@
 	if (dev_priv->tq != NULL)
 		taskqueue_free(dev_priv->tq);
 
-	bus_generic_detach(dev->device);
+	bus_generic_detach(dev->dev);
 	drm_rmmap(dev, dev_priv->mmio_map);
 	intel_teardown_gmbus(dev);
 
+	mtx_destroy(&dev_priv->dpio_lock);
 	mtx_destroy(&dev_priv->error_lock);
 	mtx_destroy(&dev_priv->error_completion_lock);
 	mtx_destroy(&dev_priv->rps_lock);
-	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
-	    DRM_MEM_DRIVER);
+	free(dev->dev_private, DRM_MEM_DRIVER);
 
 	return (0);
 }
 
 int
-i915_driver_unload(struct drm_device *dev)
-{
-
-	return (i915_driver_unload_int(dev, true));
-}
-
-int
 i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
 {
 	struct drm_i915_file_private *i915_file_priv;
@@ -1413,6 +1564,8 @@
 	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
 	file_priv->driver_priv = i915_file_priv;
 
+	drm_gem_names_init(&i915_file_priv->context_idr);
+
 	return (0);
 }
 
@@ -1437,6 +1590,7 @@
 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 {
 
+	i915_gem_context_close(dev, file_priv);
 	i915_gem_release(dev, file_priv);
 }
 
@@ -1445,7 +1599,7 @@
 	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
 
 	mtx_destroy(&i915_file_priv->mm.lck);
-	drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+	free(i915_file_priv, DRM_MEM_FILES);
 }
 
 struct drm_ioctl_desc i915_ioctls[] = {
@@ -1462,28 +1616,28 @@
 	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
-	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
 	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
 	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
-	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
@@ -1491,17 +1645,23 @@
 	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
 };
 
 #ifdef COMPAT_FREEBSD32
-extern drm_ioctl_desc_t i915_compat_ioctls[];
+extern struct drm_ioctl_desc i915_compat_ioctls[];
 extern int i915_compat_ioctls_nr;
 #endif
 
-struct drm_driver_info i915_driver_info = {
-	.driver_features =   DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
-	    DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
-	    DRIVER_GEM /*| DRIVER_MODESET*/,
+struct drm_driver i915_driver_info = {
+	/*
+	 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
+	 * Linux.
+	 */
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
 
 	.buf_priv_size	= sizeof(drm_i915_private_t),
 	.load		= i915_driver_load,
@@ -1511,6 +1671,8 @@
 	.lastclose	= i915_driver_lastclose,
 	.postclose	= i915_driver_postclose,
 	.device_is_agp	= i915_driver_device_is_agp,
+	.master_create	= i915_master_create,
+	.master_destroy	= i915_master_destroy,
 	.gem_init_object = i915_gem_init_object,
 	.gem_free_object = i915_gem_free_object,
 	.gem_pager_ops	= &i915_gem_pager_ops,
@@ -1523,9 +1685,9 @@
 	.ioctls		= i915_ioctls,
 #ifdef COMPAT_FREEBSD32
 	.compat_ioctls  = i915_compat_ioctls,
-	.compat_ioctls_nr = &i915_compat_ioctls_nr,
+	.num_compat_ioctls = &i915_compat_ioctls_nr,
 #endif
-	.max_ioctl	= DRM_ARRAY_SIZE(i915_ioctls),
+	.num_ioctls	= ARRAY_SIZE(i915_ioctls),
 
 	.name		= DRIVER_NAME,
 	.desc		= DRIVER_DESC,
@@ -1535,550 +1697,12 @@
 	.patchlevel	= DRIVER_PATCHLEVEL,
 };
 
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * built-in.
- *
- * \param dev   The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i9x5 is AGP.
+/*
+ * This is really ugly: Because old userspace abused the linux agp interface to
+ * manage the gtt, we need to claim that all intel devices are agp.  For
+ * otherwise the drm core refuses to initialize the agp support code.
  */
 int i915_driver_device_is_agp(struct drm_device * dev)
 {
 	return 1;
 }
-
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 tmp;
-
-	tmp = I915_READ(CLKCFG);
-
-	switch (tmp & CLKCFG_FSB_MASK) {
-	case CLKCFG_FSB_533:
-		dev_priv->fsb_freq = 533; /* 133*4 */
-		break;
-	case CLKCFG_FSB_800:
-		dev_priv->fsb_freq = 800; /* 200*4 */
-		break;
-	case CLKCFG_FSB_667:
-		dev_priv->fsb_freq =  667; /* 167*4 */
-		break;
-	case CLKCFG_FSB_400:
-		dev_priv->fsb_freq = 400; /* 100*4 */
-		break;
-	}
-
-	switch (tmp & CLKCFG_MEM_MASK) {
-	case CLKCFG_MEM_533:
-		dev_priv->mem_freq = 533;
-		break;
-	case CLKCFG_MEM_667:
-		dev_priv->mem_freq = 667;
-		break;
-	case CLKCFG_MEM_800:
-		dev_priv->mem_freq = 800;
-		break;
-	}
-
-	/* detect pineview DDR3 setting */
-	tmp = I915_READ(CSHRDDR3CTL);
-	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	u16 ddrpll, csipll;
-
-	ddrpll = I915_READ16(DDRMPLL1);
-	csipll = I915_READ16(CSIPLL0);
-
-	switch (ddrpll & 0xff) {
-	case 0xc:
-		dev_priv->mem_freq = 800;
-		break;
-	case 0x10:
-		dev_priv->mem_freq = 1066;
-		break;
-	case 0x14:
-		dev_priv->mem_freq = 1333;
-		break;
-	case 0x18:
-		dev_priv->mem_freq = 1600;
-		break;
-	default:
-		DRM_DEBUG("unknown memory frequency 0x%02x\n",
-				 ddrpll & 0xff);
-		dev_priv->mem_freq = 0;
-		break;
-	}
-
-	dev_priv->r_t = dev_priv->mem_freq;
-
-	switch (csipll & 0x3ff) {
-	case 0x00c:
-		dev_priv->fsb_freq = 3200;
-		break;
-	case 0x00e:
-		dev_priv->fsb_freq = 3733;
-		break;
-	case 0x010:
-		dev_priv->fsb_freq = 4266;
-		break;
-	case 0x012:
-		dev_priv->fsb_freq = 4800;
-		break;
-	case 0x014:
-		dev_priv->fsb_freq = 5333;
-		break;
-	case 0x016:
-		dev_priv->fsb_freq = 5866;
-		break;
-	case 0x018:
-		dev_priv->fsb_freq = 6400;
-		break;
-	default:
-		DRM_DEBUG("unknown fsb frequency 0x%04x\n",
-				 csipll & 0x3ff);
-		dev_priv->fsb_freq = 0;
-		break;
-	}
-
-	if (dev_priv->fsb_freq == 3200) {
-		dev_priv->c_m = 0;
-	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
-		dev_priv->c_m = 1;
-	} else {
-		dev_priv->c_m = 2;
-	}
-}
-
-static const struct cparams {
-	u16 i;
-	u16 t;
-	u16 m;
-	u16 c;
-} cparams[] = {
-	{ 1, 1333, 301, 28664 },
-	{ 1, 1066, 294, 24460 },
-	{ 1, 800, 294, 25192 },
-	{ 0, 1333, 276, 27605 },
-	{ 0, 1066, 276, 27605 },
-	{ 0, 800, 231, 23784 },
-};
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
-	u64 total_count, diff, ret;
-	u32 count1, count2, count3, m = 0, c = 0;
-	unsigned long now = jiffies_to_msecs(jiffies), diff1;
-	int i;
-
-	diff1 = now - dev_priv->last_time1;
-	/*
-	 * sysctl(8) reads the value of sysctl twice in rapid
-	 * succession.  There is high chance that it happens in the
-	 * same timer tick.  Use the cached value to not divide by
-	 * zero and give the hw a chance to gather more samples.
-	 */
-	if (diff1 <= 10)
-		return (dev_priv->chipset_power);
-
-	count1 = I915_READ(DMIEC);
-	count2 = I915_READ(DDREC);
-	count3 = I915_READ(CSIEC);
-
-	total_count = count1 + count2 + count3;
-
-	/* FIXME: handle per-counter overflow */
-	if (total_count < dev_priv->last_count1) {
-		diff = ~0UL - dev_priv->last_count1;
-		diff += total_count;
-	} else {
-		diff = total_count - dev_priv->last_count1;
-	}
-
-	for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
-		if (cparams[i].i == dev_priv->c_m &&
-		    cparams[i].t == dev_priv->r_t) {
-			m = cparams[i].m;
-			c = cparams[i].c;
-			break;
-		}
-	}
-
-	diff = diff / diff1;
-	ret = ((m * diff) + c);
-	ret = ret / 10;
-
-	dev_priv->last_count1 = total_count;
-	dev_priv->last_time1 = now;
-
-	dev_priv->chipset_power = ret;
-	return (ret);
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
-{
-	unsigned long m, x, b;
-	u32 tsfs;
-
-	tsfs = I915_READ(TSFS);
-
-	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
-	x = I915_READ8(I915_TR1);
-
-	b = tsfs & TSFS_INTR_MASK;
-
-	return ((m * x) / 127) - b;
-}
-
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
-	static const struct v_table {
-		u16 vd; /* in .1 mil */
-		u16 vm; /* in .1 mil */
-	} v_table[] = {
-		{ 0, 0, },
-		{ 375, 0, },
-		{ 500, 0, },
-		{ 625, 0, },
-		{ 750, 0, },
-		{ 875, 0, },
-		{ 1000, 0, },
-		{ 1125, 0, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4125, 3000, },
-		{ 4250, 3125, },
-		{ 4375, 3250, },
-		{ 4500, 3375, },
-		{ 4625, 3500, },
-		{ 4750, 3625, },
-		{ 4875, 3750, },
-		{ 5000, 3875, },
-		{ 5125, 4000, },
-		{ 5250, 4125, },
-		{ 5375, 4250, },
-		{ 5500, 4375, },
-		{ 5625, 4500, },
-		{ 5750, 4625, },
-		{ 5875, 4750, },
-		{ 6000, 4875, },
-		{ 6125, 5000, },
-		{ 6250, 5125, },
-		{ 6375, 5250, },
-		{ 6500, 5375, },
-		{ 6625, 5500, },
-		{ 6750, 5625, },
-		{ 6875, 5750, },
-		{ 7000, 5875, },
-		{ 7125, 6000, },
-		{ 7250, 6125, },
-		{ 7375, 6250, },
-		{ 7500, 6375, },
-		{ 7625, 6500, },
-		{ 7750, 6625, },
-		{ 7875, 6750, },
-		{ 8000, 6875, },
-		{ 8125, 7000, },
-		{ 8250, 7125, },
-		{ 8375, 7250, },
-		{ 8500, 7375, },
-		{ 8625, 7500, },
-		{ 8750, 7625, },
-		{ 8875, 7750, },
-		{ 9000, 7875, },
-		{ 9125, 8000, },
-		{ 9250, 8125, },
-		{ 9375, 8250, },
-		{ 9500, 8375, },
-		{ 9625, 8500, },
-		{ 9750, 8625, },
-		{ 9875, 8750, },
-		{ 10000, 8875, },
-		{ 10125, 9000, },
-		{ 10250, 9125, },
-		{ 10375, 9250, },
-		{ 10500, 9375, },
-		{ 10625, 9500, },
-		{ 10750, 9625, },
-		{ 10875, 9750, },
-		{ 11000, 9875, },
-		{ 11125, 10000, },
-		{ 11250, 10125, },
-		{ 11375, 10250, },
-		{ 11500, 10375, },
-		{ 11625, 10500, },
-		{ 11750, 10625, },
-		{ 11875, 10750, },
-		{ 12000, 10875, },
-		{ 12125, 11000, },
-		{ 12250, 11125, },
-		{ 12375, 11250, },
-		{ 12500, 11375, },
-		{ 12625, 11500, },
-		{ 12750, 11625, },
-		{ 12875, 11750, },
-		{ 13000, 11875, },
-		{ 13125, 12000, },
-		{ 13250, 12125, },
-		{ 13375, 12250, },
-		{ 13500, 12375, },
-		{ 13625, 12500, },
-		{ 13750, 12625, },
-		{ 13875, 12750, },
-		{ 14000, 12875, },
-		{ 14125, 13000, },
-		{ 14250, 13125, },
-		{ 14375, 13250, },
-		{ 14500, 13375, },
-		{ 14625, 13500, },
-		{ 14750, 13625, },
-		{ 14875, 13750, },
-		{ 15000, 13875, },
-		{ 15125, 14000, },
-		{ 15250, 14125, },
-		{ 15375, 14250, },
-		{ 15500, 14375, },
-		{ 15625, 14500, },
-		{ 15750, 14625, },
-		{ 15875, 14750, },
-		{ 16000, 14875, },
-		{ 16125, 15000, },
-	};
-	if (dev_priv->info->is_mobile)
-		return v_table[pxvid].vm;
-	else
-		return v_table[pxvid].vd;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
-	struct timespec now, diff1;
-	u64 diff;
-	unsigned long diffms;
-	u32 count;
-
-	if (dev_priv->info->gen != 5)
-		return;
-
-	nanotime(&now);
-	diff1 = now;
-	timespecsub(&diff1, &dev_priv->last_time2);
-
-	/* Don't divide by 0 */
-	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
-	if (!diffms)
-		return;
-
-	count = I915_READ(GFXEC);
-
-	if (count < dev_priv->last_count2) {
-		diff = ~0UL - dev_priv->last_count2;
-		diff += count;
-	} else {
-		diff = count - dev_priv->last_count2;
-	}
-
-	dev_priv->last_count2 = count;
-	dev_priv->last_time2 = now;
-
-	/* More magic constants... */
-	diff = diff * 1181;
-	diff = diff / (diffms * 10);
-	dev_priv->gfx_power = diff;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
-	unsigned long t, corr, state1, corr2, state2;
-	u32 pxvid, ext_v;
-
-	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
-	pxvid = (pxvid >> 24) & 0x7f;
-	ext_v = pvid_to_extvid(dev_priv, pxvid);
-
-	state1 = ext_v;
-
-	t = i915_mch_val(dev_priv);
-
-	/* Revel in the empirically derived constants */
-
-	/* Correction factor in 1/100000 units */
-	if (t > 80)
-		corr = ((t * 2349) + 135940);
-	else if (t >= 50)
-		corr = ((t * 964) + 29317);
-	else /* < 50 */
-		corr = ((t * 301) + 1004);
-
-	corr = corr * ((150142 * state1) / 10000 - 78642);
-	corr /= 100000;
-	corr2 = (corr * dev_priv->corr);
-
-	state2 = (corr2 * state1) / 10000;
-	state2 /= 100; /* convert to mW */
-
-	i915_update_gfx_val(dev_priv);
-
-	return dev_priv->gfx_power + state2;
-}
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
-	struct drm_i915_private *dev_priv;
-	unsigned long chipset_val, graphics_val, ret = 0;
-
-	mtx_lock(&mchdev_lock);
-	if (!i915_mch_dev)
-		goto out_unlock;
-	dev_priv = i915_mch_dev;
-
-	chipset_val = i915_chipset_val(dev_priv);
-	graphics_val = i915_gfx_val(dev_priv);
-
-	ret = chipset_val + graphics_val;
-
-out_unlock:
-	mtx_unlock(&mchdev_lock);
-
-	return ret;
-}
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
-	struct drm_i915_private *dev_priv;
-	bool ret = true;
-
-	mtx_lock(&mchdev_lock);
-	if (!i915_mch_dev) {
-		ret = false;
-		goto out_unlock;
-	}
-	dev_priv = i915_mch_dev;
-
-	if (dev_priv->max_delay > dev_priv->fmax)
-		dev_priv->max_delay--;
-
-out_unlock:
-	mtx_unlock(&mchdev_lock);
-
-	return ret;
-}
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
-	struct drm_i915_private *dev_priv;
-	bool ret = true;
-
-	mtx_lock(&mchdev_lock);
-	if (!i915_mch_dev) {
-		ret = false;
-		goto out_unlock;
-	}
-	dev_priv = i915_mch_dev;
-
-	if (dev_priv->max_delay < dev_priv->min_delay)
-		dev_priv->max_delay++;
-
-out_unlock:
-	mtx_unlock(&mchdev_lock);
-
-	return ret;
-}
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
-	struct drm_i915_private *dev_priv;
-	bool ret = false;
-
-	mtx_lock(&mchdev_lock);
-	if (!i915_mch_dev)
-		goto out_unlock;
-	dev_priv = i915_mch_dev;
-
-	ret = dev_priv->busy;
-
-out_unlock:
-	mtx_unlock(&mchdev_lock);
-
-	return ret;
-}
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
-	struct drm_i915_private *dev_priv;
-	bool ret = true;
-
-	mtx_lock(&mchdev_lock);
-	if (!i915_mch_dev) {
-		ret = false;
-		goto out_unlock;
-	}
-	dev_priv = i915_mch_dev;
-
-	dev_priv->max_delay = dev_priv->fstart;
-
-	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
-		ret = false;
-
-out_unlock:
-	mtx_unlock(&mchdev_lock);
-
-	return ret;
-}

Modified: trunk/sys/dev/drm2/i915/i915_drm.h
===================================================================
--- trunk/sys/dev/drm2/i915/i915_drm.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_drm.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  * All Rights Reserved.
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_drm.h 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_drm.h 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #ifndef _I915_DRM_H_
 #define _I915_DRM_H_
@@ -72,7 +73,7 @@
 	unsigned int sarea_handle;
 } drm_i915_init_t;
 
-typedef struct drm_i915_sarea {
+typedef struct _drm_i915_sarea {
 	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
 	int last_upload;	/* last time texture was uploaded */
 	int last_enqueue;	/* last time a buffer was enqueued */
@@ -114,14 +115,14 @@
 	unsigned int rotated_tiled;
 	unsigned int rotated2_tiled;
 
-	int planeA_x;
-	int planeA_y;
-	int planeA_w;
-	int planeA_h;
-	int planeB_x;
-	int planeB_y;
-	int planeB_w;
-	int planeB_h;
+	int pipeA_x;
+	int pipeA_y;
+	int pipeA_w;
+	int pipeA_h;
+	int pipeB_x;
+	int pipeB_y;
+	int pipeB_w;
+	int pipeB_h;
 
 	/* Triple buffering */
 	drm_handle_t third_handle;
@@ -139,6 +140,16 @@
 	unsigned int depth_bo_handle;
 } drm_i915_sarea_t;
 
+/* due to userspace building against these headers we need some compat here */
+#define planeA_x pipeA_x
+#define planeA_y pipeA_y
+#define planeA_w pipeA_w
+#define planeA_h pipeA_h
+#define planeB_x pipeB_x
+#define planeB_y pipeB_y
+#define planeB_w pipeB_w
+#define planeB_h pipeB_h
+
 /* Driver specific fence types and classes.
  */
 
@@ -204,6 +215,8 @@
 #define DRM_I915_GEM_EXECBUFFER2	0x29
 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a
 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
+#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -222,7 +235,6 @@
 #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
 #define DRM_IOCTL_I915_MMIO             DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
-#define DRM_IOCTL_I915_EXECBUFFER	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
 #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
@@ -248,6 +260,8 @@
 #define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 
 /* Asynchronous page flipping:
  */
@@ -297,15 +311,15 @@
 
 /* Ioctl to query kernel params:
  */
-#define I915_PARAM_IRQ_ACTIVE            1
-#define I915_PARAM_ALLOW_BATCHBUFFER     2
-#define I915_PARAM_LAST_DISPATCH         3
-#define I915_PARAM_CHIPSET_ID            4
-#define I915_PARAM_HAS_GEM               5
-#define I915_PARAM_NUM_FENCES_AVAIL      6
-#define I915_PARAM_HAS_OVERLAY           7
+#define I915_PARAM_IRQ_ACTIVE		 1
+#define I915_PARAM_ALLOW_BATCHBUFFER	 2
+#define I915_PARAM_LAST_DISPATCH	 3
+#define I915_PARAM_CHIPSET_ID		 4
+#define I915_PARAM_HAS_GEM		 5
+#define I915_PARAM_NUM_FENCES_AVAIL	 6
+#define I915_PARAM_HAS_OVERLAY		 7
 #define I915_PARAM_HAS_PAGEFLIPPING	 8
-#define I915_PARAM_HAS_EXECBUF2          9
+#define I915_PARAM_HAS_EXECBUF2	 9
 #define I915_PARAM_HAS_BSD		 10
 #define I915_PARAM_HAS_BLT		 11
 #define I915_PARAM_HAS_RELAXED_FENCING	 12
@@ -313,7 +327,8 @@
 #define I915_PARAM_HAS_EXEC_CONSTANTS	 14
 #define I915_PARAM_HAS_RELAXED_DELTA	 15
 #define I915_PARAM_HAS_GEN7_SOL_RESET	 16
-#define I915_PARAM_HAS_LLC     	 17
+#define I915_PARAM_HAS_LLC		 17
+#define I915_PARAM_HAS_ALIASING_PPGTT	 18
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -442,26 +457,6 @@
 #define I915_RELOC1_STRIDE 4
 
 
-struct drm_i915_op_arg {
-	uint64_t next;
-	uint64_t reloc_ptr;
-	int handled;
-	unsigned int pad64;
-	union {
-		struct drm_bo_op_req req;
-		struct drm_bo_arg_rep rep;
-	} d;
-
-};
-
-struct drm_i915_execbuffer {
-	uint64_t ops_list;
-	uint32_t num_buffers;
-	struct drm_i915_batchbuffer batch;
-	drm_context_t context; /* for lockless use in the future */
-	struct drm_fence_arg fence_arg;
-};
-
 struct drm_i915_gem_init {
 	/**
 	 * Beginning offset in the GTT to be managed by the DRM memory
@@ -702,7 +697,7 @@
 
 #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
 	uint64_t flags;
-	uint64_t rsvd1;
+	uint64_t rsvd1; /* now used for context info */
 	uint64_t rsvd2;
 };
 
@@ -746,6 +741,12 @@
 /** Resets the SO write offset registers for transform feedback on gen7. */
 #define I915_EXEC_GEN7_SOL_RESET	(1<<8)
 
+#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
+#define i915_execbuffer2_set_context_id(eb2, context) \
+	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+#define i915_execbuffer2_get_context_id(eb2) \
+	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+
 struct drm_i915_gem_pin {
 	/** Handle of the buffer to be pinned. */
 	uint32_t handle;
@@ -968,4 +969,15 @@
 	uint32_t flags;
 };
 
+struct drm_i915_gem_context_create {
+	/*  output: id of new context*/
+	uint32_t ctx_id;
+	uint32_t pad;
+};
+
+struct drm_i915_gem_context_destroy {
+	uint32_t ctx_id;
+	uint32_t pad;
+};
+
 #endif				/* _I915_DRM_H_ */

Modified: trunk/sys/dev/drm2/i915/i915_drv.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_drv.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_drv.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /* i915_drv.c -- Intel i915 driver -*- linux-c -*-
  * Created: Wed Feb 14 17:10:04 2001 by gareth at valinux.com
  */
@@ -30,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_drv.c 241091 2012-10-01 08:33:39Z mav $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_drv.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -40,6 +41,8 @@
 #include <dev/drm2/drm_pciids.h>
 #include <dev/drm2/i915/intel_drv.h>
 
+#include "fb_if.h"
+
 /* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
 static drm_pci_id_list_t i915_pciidlist[] = {
 	i915_PCI_IDS
@@ -130,6 +133,7 @@
 	.gen = 5,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_bsd_ring = 1,
+	.has_pch_split = 1,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
@@ -137,6 +141,7 @@
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 0, /* disabled due to buggy hardware */
 	.has_bsd_ring = 1,
+	.has_pch_split = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
@@ -145,6 +150,7 @@
 	.has_bsd_ring = 1,
 	.has_blt_ring = 1,
 	.has_llc = 1,
+	.has_pch_split = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
@@ -154,6 +160,7 @@
 	.has_bsd_ring = 1,
 	.has_blt_ring = 1,
 	.has_llc = 1,
+	.has_pch_split = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_d_info = {
@@ -162,6 +169,7 @@
 	.has_bsd_ring = 1,
 	.has_blt_ring = 1,
 	.has_llc = 1,
+	.has_pch_split = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
@@ -171,8 +179,49 @@
 	.has_bsd_ring = 1,
 	.has_blt_ring = 1,
 	.has_llc = 1,
+	.has_pch_split = 1,
 };
 
+#if 0
+static const struct intel_device_info intel_valleyview_m_info = {
+	.gen = 7, .is_mobile = 1,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 0,
+	.has_bsd_ring = 1,
+	.has_blt_ring = 1,
+	.is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+	.gen = 7,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 0,
+	.has_bsd_ring = 1,
+	.has_blt_ring = 1,
+	.is_valleyview = 1,
+};
+#endif
+
+static const struct intel_device_info intel_haswell_d_info = {
+	.is_haswell = 1, .gen = 7,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
+	.has_blt_ring = 1,
+	.has_llc = 1,
+	.has_pch_split = 1,
+	.not_supported = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+	.is_haswell = 1, .gen = 7, .is_mobile = 1,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
+	.has_blt_ring = 1,
+	.has_llc = 1,
+	.has_pch_split = 1,
+	.not_supported = 1,
+};
+
 #define INTEL_VGA_DEVICE(id, info_) {		\
 	.device = id,				\
 	.info = info_,				\
@@ -226,9 +275,18 @@
 	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
 	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
 	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
+	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
+	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
+	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
+	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
 	{0, 0}
 };
 
+static int i915_enable_unsupported;
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv;
@@ -241,13 +299,11 @@
 	pci_save_state(dev->pdev);
 #endif
 
-	DRM_LOCK(dev);
 	/* If KMS is active, we do the leavevt stuff here */
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		error = -i915_gem_idle(dev);
+		error = i915_gem_idle(dev);
 		if (error) {
-			DRM_UNLOCK(dev);
-			device_printf(dev->device,
+			device_printf(dev->dev,
 			    "GEM idle failed, resume might fail\n");
 			return (error);
 		}
@@ -260,7 +316,6 @@
 
 	/* Modeset on resume, not lid events */
 	dev_priv->modeset_on_lid = 0;
-	DRM_UNLOCK(dev);
 
 	return 0;
 }
@@ -274,13 +329,13 @@
 	dev = device_get_softc(kdev);
 	if (dev == NULL || dev->dev_private == NULL) {
 		DRM_ERROR("DRM not initialized, aborting suspend.\n");
-		return -ENODEV;
+		return ENODEV;
 	}
 
 	DRM_DEBUG_KMS("starting suspend\n");
 	error = i915_drm_freeze(dev);
 	if (error)
-		return (error);
+		return (-error);
 
 	error = bus_generic_suspend(kdev);
 	DRM_DEBUG_KMS("finished suspend %d\n", error);
@@ -292,9 +347,10 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int error = 0;
 
-	DRM_LOCK(dev);
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		DRM_LOCK(dev);
 		i915_gem_restore_gtt_mappings(dev);
+		DRM_UNLOCK(dev);
 	}
 
 	i915_restore_state(dev);
@@ -302,14 +358,16 @@
 
 	/* KMS EnterVT equivalent */
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_init_pch_refclk(dev);
+
+		DRM_LOCK(dev);
 		dev_priv->mm.suspended = 0;
 
 		error = i915_gem_init_hw(dev);
+		DRM_UNLOCK(dev);
 
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_init_pch_refclk(dev);
-
-		DRM_UNLOCK(dev);
+		intel_modeset_init_hw(dev);
 		sx_xlock(&dev->mode_config.mutex);
 		drm_mode_config_reset(dev);
 		sx_xunlock(&dev->mode_config.mutex);
@@ -319,10 +377,6 @@
 		/* Resume the modeset for every activated CRTC */
 		drm_helper_resume_force_mode(dev);
 		sx_xunlock(&dev->mode_config.mutex);
-
-		if (IS_IRONLAKE_M(dev))
-			ironlake_enable_rc6(dev);
-		DRM_LOCK(dev);
 	}
 
 	intel_opregion_init(dev);
@@ -329,8 +383,6 @@
 
 	dev_priv->modeset_on_lid = 0;
 
-	DRM_UNLOCK(dev);
-
 	return error;
 }
 
@@ -349,9 +401,9 @@
 	pci_set_master(dev->pdev);
 #endif
 
-	ret = -i915_drm_thaw(dev);
+	ret = i915_drm_thaw(dev);
 	if (ret != 0)
-		return (ret);
+		return (-ret);
 
 	drm_kms_helper_poll_enable(dev);
 	ret = bus_generic_resume(kdev);
@@ -362,8 +414,16 @@
 static int
 i915_probe(device_t kdev)
 {
+	const struct intel_device_info *info;
+	int error;
 
-	return drm_probe(kdev, i915_pciidlist);
+	error = drm_probe_helper(kdev, i915_pciidlist);
+	if (error != 0)
+		return (-error);
+	info = i915_get_device_id(pci_get_device(kdev));
+	if (info == NULL)
+		return (ENXIO);
+	return (0);
 }
 
 int i915_modeset;
@@ -371,15 +431,31 @@
 static int
 i915_attach(device_t kdev)
 {
-	struct drm_device *dev;
 
-	dev = device_get_softc(kdev);
 	if (i915_modeset == 1)
 		i915_driver_info.driver_features |= DRIVER_MODESET;
-	dev->driver = &i915_driver_info;
-	return (drm_attach(kdev, i915_pciidlist));
+	return (-drm_attach_helper(kdev, i915_pciidlist, &i915_driver_info));
 }
 
+static struct fb_info *
+i915_fb_helper_getinfo(device_t kdev)
+{
+	struct intel_fbdev *ifbdev;
+	drm_i915_private_t *dev_priv;
+	struct drm_device *dev;
+	struct fb_info *info;
+
+	dev = device_get_softc(kdev);
+	dev_priv = dev->dev_private;
+	ifbdev = dev_priv->fbdev;
+	if (ifbdev == NULL)
+		return (NULL);
+
+	info = ifbdev->helper.fbdev;
+
+	return (info);
+}
+
 const struct intel_device_info *
 i915_get_device_id(int device)
 {
@@ -388,6 +464,8 @@
 	for (did = &pciidlist[0]; did->device != 0; did++) {
 		if (did->device != device)
 			continue;
+		if (did->info->not_supported && !i915_enable_unsupported)
+			return (NULL);
 		return (did->info);
 	}
 	return (NULL);
@@ -399,7 +477,11 @@
 	DEVMETHOD(device_attach,	i915_attach),
 	DEVMETHOD(device_suspend,	i915_suspend),
 	DEVMETHOD(device_resume,	i915_resume),
-	DEVMETHOD(device_detach,	drm_detach),
+	DEVMETHOD(device_detach,	drm_generic_detach),
+
+	/* Framebuffer service methods */
+	DEVMETHOD(fb_getinfo,		i915_fb_helper_getinfo),
+
 	DEVMETHOD_END
 };
 
@@ -420,7 +502,11 @@
 
 int intel_iommu_enabled = 0;
 TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
+int intel_iommu_gfx_mapped = 0;
+TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
 
+int i915_prefault_disable;
+TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable);
 int i915_semaphores = -1;
 TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
 static int i915_try_reset = 1;
@@ -435,10 +521,14 @@
 TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
 int i915_enable_rc6 = 0;
 TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
+int i915_lvds_channel_mode;
+TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
 int i915_panel_use_ssc = -1;
 TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
 int i915_panel_ignore_lid = 0;
 TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
+int i915_panel_invert_brightness;
+TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness);
 int i915_modeset = 1;
 TUNABLE_INT("drm.i915.modeset", &i915_modeset);
 int i915_enable_ppgtt = -1;
@@ -445,6 +535,7 @@
 TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
 int i915_enable_hangcheck = 1;
 TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
+TUNABLE_INT("drm.i915.enable_unsupported", &i915_enable_unsupported);
 
 #define	PCI_VENDOR_INTEL		0x8086
 #define INTEL_PCH_DEVICE_ID_MASK	0xff00
@@ -451,9 +542,9 @@
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
 #define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
 
-void
-intel_detect_pch(struct drm_device *dev)
+void intel_detect_pch(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv;
 	device_t pch;
@@ -465,20 +556,44 @@
 		id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
 		if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
 			dev_priv->pch_type = PCH_IBX;
+			dev_priv->num_pch_pll = 2;
 			DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
 		} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
 			dev_priv->pch_type = PCH_CPT;
+			dev_priv->num_pch_pll = 2;
 			DRM_DEBUG_KMS("Found CougarPoint PCH\n");
 		} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 			/* PantherPoint is CPT compatible */
 			dev_priv->pch_type = PCH_CPT;
+			dev_priv->num_pch_pll = 2;
 			DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+		} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+			dev_priv->pch_type = PCH_LPT;
+			dev_priv->num_pch_pll = 0;
+			DRM_DEBUG_KMS("Found LynxPoint PCH\n");
 		} else
 			DRM_DEBUG_KMS("No PCH detected\n");
+		KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS,
+		    ("num_pch_pll %d\n", dev_priv->num_pch_pll));
 	} else
 		DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
 }
 
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	if (i915_semaphores >= 0)
+		return i915_semaphores;
+
+	/* Enable semaphores on SNB when IO remapping is off */
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+		return false;
+
+	return 1;
+}
+
 void
 __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
@@ -505,7 +620,7 @@
 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
 		DELAY(10);
 
-	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
 	POSTING_READ(FORCEWAKE_MT);
 
 	count = 0;
@@ -548,7 +663,7 @@
 __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
 
-	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
 	/* The below doubles as a POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 }
@@ -586,8 +701,33 @@
 	return (ret);
 }
 
+void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+	int count;
+
+	count = 0;
+
+	/* Already awake? */
+	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
+		return;
+
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
+	POSTING_READ(FORCEWAKE_VLV);
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
+		DELAY(10);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
+	/* FIXME: confirm VLV behavior with Punit folks */
+	POSTING_READ(FORCEWAKE_VLV);
+}
+
 static int
-i8xx_do_reset(struct drm_device *dev, u8 flags)
+i8xx_do_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int onems;
@@ -627,13 +767,14 @@
 {
 	u8 gdrst;
 
-	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
-	return (gdrst & 0x1);
+	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
+	return (gdrst & GRDOM_RESET_ENABLE) == 0;
 }
 
 static int
-i965_do_reset(struct drm_device *dev, u8 flags)
+i965_do_reset(struct drm_device *dev)
 {
+	int ret;
 	u8 gdrst;
 
 	/*
@@ -641,29 +782,46 @@
 	 * well as the reset bit (GR/bit 0).  Setting the GR bit
 	 * triggers the reset; when done, the hardware will clear it.
 	 */
-	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
-	pci_write_config(dev->device, I965_GDRST, gdrst | flags | 0x1, 1);
+	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
+	pci_write_config(dev->dev, I965_GDRST,
+	    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1);
 
-	return (_intel_wait_for(dev, i965_reset_complete(dev), 500, 1,
-	    "915rst"));
+	ret =  wait_for(i965_reset_complete(dev), 500);
+	if (ret)
+		return ret;
+
+	/* We can't reset render&media without also resetting display ... */
+	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
+	pci_write_config(dev->dev, I965_GDRST,
+			 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1);
+ 
+ 	return wait_for(i965_reset_complete(dev), 500);
 }
 
 static int
-ironlake_do_reset(struct drm_device *dev, u8 flags)
+ironlake_do_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv;
 	u32 gdrst;
+	int ret;
 
 	dev_priv = dev->dev_private;
 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
-	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
-	return (_intel_wait_for(dev,
-	    (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1) != 0,
-	    500, 1, "915rst"));
+	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+	if (ret)
+		return ret;
+
+	/* We can't reset render&media without also resetting display ... */
+	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
 }
 
 static int
-gen6_do_reset(struct drm_device *dev, u8 flags)
+gen6_do_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv;
 	int ret;
@@ -685,8 +843,8 @@
 
 	/* Spin waiting for the device to ack the reset request */
 	ret = _intel_wait_for(dev,
-	    (I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
-	    500, 1, "915rst");
+	    (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
+	    500, 0, "915rst");
 
 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
  	if (dev_priv->forcewake_count)
@@ -702,14 +860,44 @@
 }
 
 int
-i915_reset(struct drm_device *dev, u8 flags)
+intel_gpu_reset(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = -ENODEV;
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6:
+		ret = gen6_do_reset(dev);
+		break;
+	case 5:
+		ret = ironlake_do_reset(dev);
+		break;
+	case 4:
+		ret = i965_do_reset(dev);
+		break;
+	case 2:
+		ret = i8xx_do_reset(dev);
+		break;
+	}
+
+	/* Also reset the gpu hangman. */
+	if (dev_priv->stop_rings) {
+		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+		dev_priv->stop_rings = 0;
+		if (ret == -ENODEV) {
+			DRM_ERROR("Reset not implemented, but ignoring "
+				  "error for simulated gpu hangs\n");
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+int i915_reset(struct drm_device *dev)
+{
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	/*
-	 * We really should only reset the display subsystem if we actually
-	 * need to
-	 */
-	bool need_display = true;
 	int ret;
 
 	if (!i915_try_reset)
@@ -718,28 +906,16 @@
 	if (!sx_try_xlock(&dev->dev_struct_lock))
 		return (-EBUSY);
 
+	dev_priv->stop_rings = 0;
+
 	i915_gem_reset(dev);
 
 	ret = -ENODEV;
-	if (time_second - dev_priv->last_gpu_reset < 5) {
+	if (time_second - dev_priv->last_gpu_reset < 5)
 		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
-	} else {
-		switch (INTEL_INFO(dev)->gen) {
-		case 7:
-		case 6:
-		ret = gen6_do_reset(dev, flags);
-		break;
-	case 5:
-		ret = ironlake_do_reset(dev, flags);
-			break;
-		case 4:
-			ret = i965_do_reset(dev, flags);
-			break;
-		case 2:
-			ret = i8xx_do_reset(dev, flags);
-			break;
-		}
-	}
+	else
+		ret = intel_gpu_reset(dev);
+
 	dev_priv->last_gpu_reset = time_second;
 	if (ret) {
 		DRM_ERROR("Failed to reset chip.\n");
@@ -749,35 +925,39 @@
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 	    !dev_priv->mm.suspended) {
+		struct intel_ring_buffer *ring;
+		int i;
+
 		dev_priv->mm.suspended = 0;
 
 		i915_gem_init_swizzling(dev);
 
-		dev_priv->rings[RCS].init(&dev_priv->rings[RCS]);
-		if (HAS_BSD(dev))
-			dev_priv->rings[VCS].init(&dev_priv->rings[VCS]);
-		if (HAS_BLT(dev))
-			dev_priv->rings[BCS].init(&dev_priv->rings[BCS]);
+		for_each_ring(ring, dev_priv, i)
+			ring->init(ring);
 
+		i915_gem_context_init(dev);
 		i915_gem_init_ppgtt(dev);
 
+		DRM_UNLOCK(dev);
+
+		if (drm_core_check_feature(dev, DRIVER_MODESET))
+			intel_modeset_init_hw(dev);
+
 		drm_irq_uninstall(dev);
-		drm_mode_config_reset(dev);
+		drm_irq_install(dev);
+	} else
 		DRM_UNLOCK(dev);
-		drm_irq_install(dev);
-		DRM_LOCK(dev);
-	}
-	DRM_UNLOCK(dev);
 
-	if (need_display) {
-		sx_xlock(&dev->mode_config.mutex);
-		drm_helper_resume_force_mode(dev);
-		sx_xunlock(&dev->mode_config.mutex);
-	}
-
 	return (0);
 }
 
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+       (((dev_priv)->info->gen >= 6) && \
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE)) && \
+       (!IS_VALLEYVIEW((dev_priv)->dev))
+
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
 	u##x val = 0; \

Modified: trunk/sys/dev/drm2/i915/i915_drv.h
===================================================================
--- trunk/sys/dev/drm2/i915/i915_drv.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_drv.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
  */
 /*
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_drv.h 239965 2012-09-01 05:35:48Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_drv.h 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #ifndef _I915_DRV_H_
 #define _I915_DRV_H_
@@ -66,10 +67,31 @@
 };
 #define plane_name(p) ((p) + 'A')
 
-#define	I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+enum port {
+	PORT_A = 0,
+	PORT_B,
+	PORT_C,
+	PORT_D,
+	PORT_E,
+	I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
 
+#define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
+
 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
 
+struct intel_pch_pll {
+	int refcount; /* count of number of CRTCs sharing this PLL */
+	int active; /* count of number of active CRTCs (i.e. DPMS on) */
+	bool on; /* is the PLL actually active? Disabled during modeset */
+	int pll_reg;
+	int fp0_reg;
+	int fp1_reg;
+};
+#define I915_NUM_PLLS 2
+
 /* Interface history:
  *
  * 1.1: Original.
@@ -115,11 +137,15 @@
 	void (*update_wm)(struct drm_device *dev);
 	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
 				 uint32_t sprite_width, int pixel_size);
+	void (*sanitize_pm)(struct drm_device *dev);
+	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
+				 struct drm_display_mode *mode);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
 			     struct drm_display_mode *mode,
 			     struct drm_display_mode *adjusted_mode,
 			     int x, int y,
 			     struct drm_framebuffer *old_fb);
+	void (*off)(struct drm_crtc *crtc);
 	void (*write_eld)(struct drm_connector *connector,
 			  struct drm_crtc *crtc);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -141,6 +167,7 @@
 
 struct intel_device_info {
 	u8 gen;
+	u8 not_supported:1;
 	u8 is_mobile:1;
 	u8 is_i85x:1;
 	u8 is_i915g:1;
@@ -152,6 +179,9 @@
 	u8 is_broadwater:1;
 	u8 is_crestline:1;
 	u8 is_ivybridge:1;
+	u8 is_valleyview:1;
+	u8 has_pch_split:1;
+	u8 is_haswell:1;
 	u8 has_fbc:1;
 	u8 has_pipe_cxsr:1;
 	u8 has_hotplug:1;
@@ -174,6 +204,17 @@
 	vm_paddr_t scratch_page_dma_addr;
 };
 
+
+/* This must match up with the value previously used for execbuf2.rsvd1. */
+#define DEFAULT_CONTEXT_ID 0
+struct i915_hw_context {
+	uint32_t id;
+	bool is_initialized;
+	struct drm_i915_file_private *file_priv;
+	struct intel_ring_buffer *ring;
+	struct drm_i915_gem_object *obj;
+};
+
 enum no_fbc_reason {
 	FBC_NO_OUTPUT, /* no outputs enabled to compress */
 	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
@@ -208,6 +249,10 @@
 };
 #define OPREGION_SIZE            (8*1024)
 
+struct drm_i915_master_private {
+	drm_local_map_t *sarea;
+	struct _drm_i915_sarea *sarea_priv;
+};
 #define I915_FENCE_REG_NONE -1
 #define I915_MAX_NUM_FENCES 16
 /* 16 fences + sign bit for FENCE_REG_NONE */
@@ -216,7 +261,6 @@
 struct drm_i915_fence_reg {
 	struct list_head lru_list;
 	struct drm_i915_gem_object *obj;
-	uint32_t setup_seqno;
 	int pin_count;
 };
 
@@ -232,10 +276,12 @@
 enum intel_pch {
 	PCH_IBX,	/* Ibexpeak PCH */
 	PCH_CPT,	/* Cougarpoint PCH */
+	PCH_LPT,	/* Lynxpoint PCH */
 };
 
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -243,18 +289,17 @@
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 
-	device_t *gmbus_bridge;
-	device_t *bbbus_bridge;
-	device_t *gmbus;
-	device_t *bbbus;
+	device_t gmbus_bridge[GMBUS_NUM_PORTS + 1];
+	device_t bbbus_bridge[GMBUS_NUM_PORTS + 1];
+	device_t gmbus[GMBUS_NUM_PORTS + 1];
+	device_t bbbus[GMBUS_NUM_PORTS + 1];
 	/** gmbus_sx protects against concurrent usage of the single hw gmbus
 	 * controller on different i2c buses. */
 	struct sx gmbus_sx;
+	uint32_t gpio_mmio_base;
 
-	int has_gem;
 	int relative_constants_mode;
 
-	drm_local_map_t *sarea;
 	drm_local_map_t *mmio_map;
 
 	/** gt_fifo_count and the subsequent register write are synchronized
@@ -265,7 +310,6 @@
 	/** gt_lock is also taken in irq contexts. */
 	struct mtx gt_lock;
 
-	drm_i915_sarea_t *sarea_priv;
 	/* drm_i915_ring_buffer_t ring; */
 	struct intel_ring_buffer rings[I915_NUM_RINGS];
 	uint32_t next_seqno;
@@ -275,7 +319,6 @@
 	dma_addr_t dma_status_page;
 	uint32_t counter;
 	unsigned int status_gfx_addr;
-	drm_local_map_t hws_map;
 	struct drm_gem_object *hws_obj;
 
 	struct drm_i915_gem_object *pwrctx;
@@ -297,23 +340,23 @@
 	u32 pch_irq_mask;
 	struct mtx irq_lock;
 
+	struct mtx dpio_lock;
+
 	u32 hotplug_supported_mask;
 
-	int tex_lru_log_granularity;
-	int allow_batchbuffer;
 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
-	int vblank_pipe;
 	int num_pipe;
+	int num_pch_pll;
 
 	/* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
 	int hangcheck_count;
-	uint32_t last_acthd;
-	uint32_t last_acthd_bsd;
-	uint32_t last_acthd_blt;
+	uint32_t last_acthd[I915_NUM_RINGS];
 	uint32_t last_instdone;
 	uint32_t last_instdone1;
 
+	unsigned int stop_rings;
+
 	struct intel_opregion opregion;
 
 
@@ -335,6 +378,8 @@
 	unsigned int lvds_use_ssc:1;
 	unsigned int display_clock_mode:1;
 	int lvds_ssc_freq;
+	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+	unsigned int lvds_val; /* used for checking LVDS channel mode */
 	struct {
 		int rate;
 		int lanes;
@@ -564,24 +609,10 @@
 		 */
 		struct list_head inactive_list;
 
-		/**
-		 * LRU list of objects which are not in the ringbuffer but
-		 * are still pinned in the GTT.
-		 */
-		struct list_head pinned_list;
-
 		/** LRU list of objects with fence regs on them. */
 		struct list_head fence_list;
 
 		/**
-		 * List of objects currently pending being freed.
-		 *
-		 * These objects are no longer in use, but due to a signal
-		 * we were prevented from freeing them at the appointed time.
-		 */
-		struct list_head deferred_free_list;
-
-		/**
 		 * We leave the user IRQ off as much as possible,
 		 * but this means that requests will finish and never
 		 * be retired once the system goes idle. Set a timer to
@@ -647,6 +678,15 @@
 
 	const struct intel_device_info *info;
 
+	/* Old dri1 support infrastructure, beware the dragons ya fools entering
+	 * here! */
+	struct {
+		unsigned allow_batchbuffer : 1;
+		u32 *gfx_hws_cpu_addr;
+	} dri1;
+
+	/* Kernel Modesetting */
+
 	struct sdvo_device_mapping sdvo_mappings[2];
 	/* indicate whether the LVDS_BORDER should be enabled or not */
 	unsigned int lvds_border_bits;
@@ -656,8 +696,9 @@
 	struct drm_crtc *plane_to_crtc_mapping[3];
 	struct drm_crtc *pipe_to_crtc_mapping[3];
 	/* wait_queue_head_t pending_flip_queue; XXXKIB */
-	bool flip_pending_is_done;
 
+	struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+
 	/* Reclocking support */
 	bool render_reclock_avail;
 	bool lvds_downclock_avail;
@@ -700,6 +741,9 @@
 
 	enum no_fbc_reason no_fbc_reason;
 
+	struct drm_mm_node *compressed_fb;
+	struct drm_mm_node *compressed_llb;
+
 	unsigned long cfb_size;
 	unsigned int cfb_fb;
 	int cfb_plane;
@@ -713,6 +757,7 @@
 	struct task hotplug_task;
 	int error_completion;
 	struct mtx error_completion_lock;
+	/* Protected by dev->error_lock. */
 	struct drm_i915_error_state *first_error;
 	struct mtx error_lock;
 	struct callout hangcheck_timer;
@@ -723,8 +768,16 @@
 
 	struct drm_property *broadcast_rgb_property;
 	struct drm_property *force_audio_property;
+
+	bool hw_contexts_disabled;
+	uint32_t hw_context_size;
 } drm_i915_private_t;
 
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+		if (((ring__) = &(dev_priv__)->rings[(i__)]), intel_ring_initialized((ring__)))
+
 enum hdmi_force_audio {
 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
 	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
@@ -795,7 +848,14 @@
 	 * Current tiling mode for the object.
 	 */
 	unsigned int tiling_mode:2;
-	unsigned int tiling_changed:1;
+	/**
+	 * Whether the tiling parameters for the currently associated fence
+	 * register have changed. Note that for the purposes of tracking
+	 * tiling changes we also treat the unfenced register, the register
+	 * slot that the object occupies whilst it executes a fenced
+	 * command (such as BLT on gen2/3), as a "fence".
+	 */
+	unsigned int fence_dirty:1;
 
 	/** How many users have pinned this object in GTT space. The following
 	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -822,6 +882,7 @@
 	 */
 	unsigned int fault_mappable:1;
 	unsigned int pin_mappable:1;
+	unsigned int pin_display:1;
 
 	/*
 	 * Is the GPU currently using a fence to access this buffer,
@@ -832,8 +893,10 @@
 	unsigned int cache_level:2;
 
 	unsigned int has_aliasing_ppgtt_mapping:1;
+	unsigned int has_global_gtt_mapping:1;
 
 	vm_page_t *pages;
+	int pages_pin_count;
 
 	/**
 	 * DMAR support
@@ -854,13 +917,12 @@
 	 */
 	uint32_t gtt_offset;
 
+	struct intel_ring_buffer *ring;
+
 	/** Breadcrumb of last rendering to the buffer. */
 	uint32_t last_rendering_seqno;
-	struct intel_ring_buffer *ring;
-
 	/** Breadcrumb of last fenced GPU access to the buffer. */
 	uint32_t last_fenced_seqno;
-	struct intel_ring_buffer *last_fenced_ring;
 
 	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
@@ -868,12 +930,6 @@
 	/** Record of address bit 17 of each page at last unbind. */
 	unsigned long *bit_17;
 
-	/**
-	 * If present, while GEM_DOMAIN_CPU is in the read domain this array
-	 * flags which individual pages are valid.
-	 */
-	uint8_t *page_cpu_valid;
-
 	/** User space pin count and filp owning the pin */
 	uint32_t user_pin_count;
 	struct drm_file *pin_filp;
@@ -889,7 +945,7 @@
 	int pending_flip;
 };
 
-#define	to_intel_bo(x) member2struct(drm_i915_gem_object, base, (x))
+#define	to_intel_bo(x) __containerof(x, struct drm_i915_gem_object, base)
 
 /**
  * Request queue structure.
@@ -927,11 +983,15 @@
 		struct list_head request_list;
 		struct mtx lck;
 	} mm;
+	struct drm_gem_names context_idr;
 };
 
 struct drm_i915_error_state {
+	u_int ref;
 	u32 eir;
 	u32 pgtbl_er;
+	u32 ier;
+	bool waiting[I915_NUM_RINGS];
 	u32 pipestat[I915_MAX_PIPES];
 	u32 tail[I915_NUM_RINGS];
 	u32 head[I915_NUM_RINGS];
@@ -1010,13 +1070,16 @@
 
 extern int intel_iommu_enabled;
 extern struct drm_ioctl_desc i915_ioctls[];
-extern struct drm_driver_info i915_driver_info;
+extern struct drm_driver i915_driver_info;
 extern struct cdev_pager_ops i915_gem_pager_ops;
 extern unsigned int i915_fbpercrtc;
 extern int i915_panel_ignore_lid;
+extern int i915_panel_invert_brightness;
 extern unsigned int i915_powersave;
+extern int i915_prefault_disable;
 extern int i915_semaphores;
 extern unsigned int i915_lvds_downclock;
+extern int i915_lvds_channel_mode;
 extern int i915_panel_use_ssc;
 extern int i915_vbt_sdvo_panel_type;
 extern int i915_enable_rc6;
@@ -1026,7 +1089,8 @@
 
 const struct intel_device_info *i915_get_device_id(int device);
 
-int i915_reset(struct drm_device *dev, u8 flags);
+int i915_reset(struct drm_device *dev);
+extern int intel_gpu_reset(struct drm_device *dev);
 
 /* i915_debug.c */
 int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
@@ -1033,6 +1097,9 @@
     struct sysctl_oid *top);
 void i915_sysctl_cleanup(struct drm_device *dev);
 
+extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
+extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
+
 				/* i915_dma.c */
 int i915_batchbuffer(struct drm_device *dev, void *data,
     struct drm_file *file_priv);
@@ -1040,6 +1107,7 @@
     struct drm_file *file_priv);
 int i915_getparam(struct drm_device *dev, void *data,
     struct drm_file *file_priv);
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
 extern void i915_kernel_lost_context(struct drm_device * dev);
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
@@ -1071,20 +1139,12 @@
 /* i915_irq.c */
 extern int i915_irq_emit(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
-extern int i915_irq_wait(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv);
-
 extern void intel_irq_init(struct drm_device *dev);
 
-extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-extern int i915_vblank_swap(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv);
 void intel_enable_asle(struct drm_device *dev);
 void i915_hangcheck_elapsed(void *context);
 void i915_handle_error(struct drm_device *dev, bool wedged);
+void i915_error_state_free(struct drm_i915_error_state *error);
 
 void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1145,13 +1205,15 @@
 void i915_gem_lastclose(struct drm_device *dev);
 uint32_t i915_get_gem_seqno(struct drm_device *dev);
 
-static inline void
+static inline bool
 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
 		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 		dev_priv->fence_regs[obj->fence_reg].pin_count++;
-	}
+		return true;
+	} else
+		return false;
 }
 
 static inline void
@@ -1168,36 +1230,38 @@
 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
     size_t size);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
-    unsigned long mappable_end, unsigned long end);
 uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
     uint32_t size, int tiling_mode);
 int i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
     bool write);
+int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+    bool write);
 int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
     u32 alignment, struct intel_ring_buffer *pipelined);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
 int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int i915_gem_flush_ring(struct intel_ring_buffer *ring,
     uint32_t invalidate_domains, uint32_t flush_domains);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+    struct intel_ring_buffer *to);
 int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 int i915_gem_idle(struct drm_device *dev);
+int i915_gem_init(struct drm_device *dev);
 int i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_init_ppgtt(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int i915_gpu_idle(struct drm_device *dev);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
     struct intel_ring_buffer *ring, uint32_t seqno);
 int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
     struct drm_i915_gem_request *request);
-int i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
-    struct intel_ring_buffer *pipelined);
+int i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
 void i915_gem_reset(struct drm_device *dev);
-int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno,
-    bool do_retire);
+int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno);
 int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
 int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
     uint64_t *phys);
@@ -1205,6 +1269,17 @@
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
     enum i915_cache_level cache_level);
 
+/* i915_gem_context.c */
+void i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct intel_ring_buffer *ring,
+			struct drm_file *file, int to_id);
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file);
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file);
+
 void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_detach_phys_object(struct drm_device *dev,
     struct drm_i915_gem_object *obj);
@@ -1222,13 +1297,18 @@
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
+    struct vm_page *m);
 
 /* i915_gem_evict.c */
 int i915_gem_evict_something(struct drm_device *dev, int min_size,
     unsigned alignment, bool mappable);
 int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
-int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
 
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
+
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
@@ -1239,13 +1319,20 @@
 extern void intel_gmbus_set_speed(device_t idev, int speed);
 extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
 extern void intel_iic_reset(struct drm_device *dev);
+static inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+	return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+extern device_t intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+    unsigned port);
 
 /* intel_opregion.c */
 int intel_opregion_setup(struct drm_device *dev);
-extern int intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_init(struct drm_device *dev);
 extern void intel_opregion_fini(struct drm_device *dev);
-extern void opregion_asle_intr(struct drm_device *dev);
-extern void opregion_enable_asle(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern void intel_opregion_gse_intr(struct drm_device *dev);
+extern void intel_opregion_enable_asle(struct drm_device *dev);
 
 /* i915_gem_gtt.c */
 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
@@ -1256,17 +1343,20 @@
     struct drm_i915_gem_object *obj);
 
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+			      enum i915_cache_level cache_level);
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
-    enum i915_cache_level cache_level);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
+    unsigned long mappable_end, unsigned long end);
 
 /* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern bool intel_fbc_enabled(struct drm_device *dev);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 extern void ironlake_init_pch_refclk(struct drm_device *dev);
@@ -1274,12 +1364,19 @@
 extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
 
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
 extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
 extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
 extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
 extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
 
+extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
+
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
     struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct sbuf *m,
@@ -1304,12 +1401,6 @@
 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
-	(((dev_priv)->info->gen >= 6) && \
-	 ((reg) < 0x40000) &&		 \
-	 ((reg) != FORCEWAKE))
-
 #define __i915_read(x, y) \
 	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
 
@@ -1349,22 +1440,6 @@
 
 #define I915_VERBOSE 0
 
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
-
-#define BEGIN_LP_RING(n) \
-	intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
-	intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
-	intel_ring_advance(LP_RING(dev_priv))
-
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
-	if (LP_RING(dev->dev_private)->obj == NULL)			\
-		LOCK_TEST_WITH_RETURN(dev, file);			\
-} while (0)
-
 /**
  * Reads a dword out of the status page, which is written to from the command
  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1380,10 +1455,7 @@
  *
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
-#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX		0x20
-#define I915_BREADCRUMB_INDEX		0x21
 
 #define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
 
@@ -1406,6 +1478,8 @@
 #define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
 #define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
 #define	IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
+#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev)		(INTEL_INFO(dev)->is_haswell)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 
 /* XXXKIB LEGACY */
@@ -1443,6 +1517,7 @@
 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
+#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
 #define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6)
 
 #define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
@@ -1466,10 +1541,11 @@
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
 
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
@@ -1482,6 +1558,23 @@
 	return ((int32_t)(seq1 - seq2) >= 0);
 }
 
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		intel_gtt_chipset_flush();
+}
+
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+	/* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */
+	obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+	KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count"));
+	obj->pages_pin_count--;
+}
+
 u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
 
 #endif

Modified: trunk/sys/dev/drm2/i915/i915_gem.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_gem.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_gem.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright © 2008 Intel Corporation
  *
@@ -52,7 +53,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_gem.c 252496 2013-07-02 04:42:32Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_gem.c 307672 2016-10-20 13:12:19Z kib $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -64,6 +65,11 @@
 #include <sys/sched.h>
 #include <sys/sf_buf.h>
 
+#include <vm/vm.h>
+#include <vm/vm_pageout.h>
+
+#include <machine/md_var.h>
+
 static void i915_gem_object_flush_cpu_write_domain(
     struct drm_i915_gem_object *obj);
 static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
@@ -75,32 +81,57 @@
 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
     int flags);
 static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
-    bool write);
-static void i915_gem_object_set_to_full_cpu_read_domain(
-    struct drm_i915_gem_object *obj);
-static int i915_gem_object_set_cpu_read_domain_range(
-    struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size);
+static void i915_gem_object_put_pages_range(struct drm_i915_gem_object *obj,
+    off_t start, off_t end);
+static int i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
+    off_t start, off_t end);
 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
-static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex);
+static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex,
+    bool *fresh);
 static void i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
     uint32_t flush_domains);
-static void i915_gem_clear_fence_reg(struct drm_device *dev,
-    struct drm_i915_fence_reg *reg);
 static void i915_gem_reset_fences(struct drm_device *dev);
 static void i915_gem_retire_task_handler(void *arg, int pending);
-static int i915_gem_phys_pwrite(struct drm_device *dev,
-    struct drm_i915_gem_object *obj, uint64_t data_ptr, uint64_t offset,
-    uint64_t size, struct drm_file *file_priv);
 static void i915_gem_lowmem(void *arg);
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+    struct drm_i915_gem_object *obj);
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+    bool interruptible);
+static int i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno);
 
 MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
 long i915_gem_wired_pages_cnt;
 
+static bool cpu_cache_is_coherent(struct drm_device *dev,
+				  enum i915_cache_level level)
+{
+	return HAS_LLC(dev) || level != I915_CACHE_NONE;
+}
+
+static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+		return true;
+
+	return obj->pin_display;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+	if (obj->tiling_mode)
+		i915_gem_release_mmap(obj);
+
+	/* As we do not have an associated fence register, we will force
+	 * a tiling change if we ever need to acquire one.
+	 */
+	obj->fence_dirty = false;
+	obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
 static void
 i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size)
 {
@@ -131,6 +162,8 @@
 	while (dev_priv->error_completion == 0) {
 		ret = -msleep(&dev_priv->error_completion,
 		    &dev_priv->error_completion_lock, PCATCH, "915wco", 0);
+		if (ret == -ERESTART)
+			ret = -ERESTARTSYS;
 		if (ret != 0) {
 			mtx_unlock(&dev_priv->error_completion_lock);
 			return (ret);
@@ -138,7 +171,7 @@
 	}
 	mtx_unlock(&dev_priv->error_completion_lock);
 
-	if (atomic_read(&dev_priv->mm.wedged)) {
+	if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
 		mtx_lock(&dev_priv->error_completion_lock);
 		dev_priv->error_completion++;
 		mtx_unlock(&dev_priv->error_completion_lock);
@@ -169,50 +202,42 @@
 }
 
 
-static void
-i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+void
+i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	struct drm_device *dev;
 	drm_i915_private_t *dev_priv;
-	int ret;
 
 	dev = obj->base.dev;
 	dev_priv = dev->dev_private;
 
-	ret = i915_gem_object_unbind(obj);
-	if (ret == -ERESTART) {
-		list_move(&obj->mm_list, &dev_priv->mm.deferred_free_list);
-		return;
+	CTR1(KTR_DRM, "object_destroy_tail %p", obj);
+
+	if (obj->phys_obj)
+		i915_gem_detach_phys_object(dev, obj);
+
+	obj->pin_count = 0;
+	if (i915_gem_object_unbind(obj) == -ERESTARTSYS) {
+		bool was_interruptible;
+
+		was_interruptible = dev_priv->mm.interruptible;
+		dev_priv->mm.interruptible = false;
+
+		if (i915_gem_object_unbind(obj))
+			printf("i915_gem_free_object: unbind\n");
+
+		dev_priv->mm.interruptible = was_interruptible;
 	}
 
-	CTR1(KTR_DRM, "object_destroy_tail %p", obj);
 	drm_gem_free_mmap_offset(&obj->base);
 	drm_gem_object_release(&obj->base);
 	i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
-	free(obj->page_cpu_valid, DRM_I915_GEM);
 	free(obj->bit_17, DRM_I915_GEM);
 	free(obj, DRM_I915_GEM);
 }
 
-void
-i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
-	struct drm_i915_gem_object *obj;
-	struct drm_device *dev;
-
-	obj = to_intel_bo(gem_obj);
-	dev = obj->base.dev;
-
-	while (obj->pin_count > 0)
-		i915_gem_object_unpin(obj);
-
-	if (obj->phys_obj != NULL)
-		i915_gem_detach_phys_object(dev, obj);
-
-	i915_gem_free_object_tail(obj);
-}
-
 static void
 init_ring_lists(struct intel_ring_buffer *ring)
 {
@@ -233,9 +258,7 @@
 	INIT_LIST_HEAD(&dev_priv->mm.active_list);
 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
 	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
 	for (i = 0; i < I915_NUM_RINGS; i++)
 		init_ring_lists(&dev_priv->rings[i]);
@@ -247,16 +270,8 @@
 
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
 	if (IS_GEN3(dev)) {
-		u32 tmp = I915_READ(MI_ARB_STATE);
-		if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
-			/*
-			 * arb state is a masked write, so set bit +
-			 * bit in mask.
-			 */
-			tmp = MI_ARB_C3_LP_WRITE_ENABLE |
-			    (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
-			I915_WRITE(MI_ARB_STATE, tmp);
-		}
+		I915_WRITE(MI_ARB_STATE,
+			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
 	}
 
 	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -272,9 +287,8 @@
 		dev_priv->num_fence_regs = 8;
 
 	/* Initialize fence registers to zero */
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
-	}
+	i915_gem_reset_fences(dev);
+
 	i915_gem_detect_bit_6_swizzle(dev);
 	dev_priv->mm.interruptible = true;
 
@@ -283,41 +297,16 @@
 }
 
 int
-i915_gem_do_init(struct drm_device *dev, unsigned long start,
-    unsigned long mappable_end, unsigned long end)
-{
-	drm_i915_private_t *dev_priv;
-	unsigned long mappable;
-	int error;
-
-	dev_priv = dev->dev_private;
-	mappable = min(end, mappable_end) - start;
-
-	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
-
-	dev_priv->mm.gtt_start = start;
-	dev_priv->mm.gtt_mappable_end = mappable_end;
-	dev_priv->mm.gtt_end = end;
-	dev_priv->mm.gtt_total = end - start;
-	dev_priv->mm.mappable_gtt_total = mappable;
-
-	/* Take over this portion of the GTT */
-	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
-	device_printf(dev->device,
-	    "taking over the fictitious range 0x%lx-0x%lx\n",
-	    dev->agp->base + start, dev->agp->base + start + mappable);
-	error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
-	    dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
-	return (error);
-}
-
-int
 i915_gem_init_ioctl(struct drm_device *dev, void *data,
     struct drm_file *file)
 {
 	struct drm_i915_gem_init *args;
 	drm_i915_private_t *dev_priv;
+	int error;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
 	dev_priv = dev->dev_private;
 	args = data;
 
@@ -327,12 +316,20 @@
 
 	if (mtx_initialized(&dev_priv->mm.gtt_space.unused_lock))
 		return (-EBUSY);
+
+	/* GEM with user mode setting was never supported on ilk and later. */
+	if (INTEL_INFO(dev)->gen >= 5)
+		return -ENODEV;
+
 	/*
 	 * XXXKIB. The second-time initialization should be guarded
 	 * against.
 	 */
-	return (i915_gem_do_init(dev, args->gtt_start, args->gtt_end,
-	    args->gtt_end));
+	DRM_LOCK(dev);
+	error = i915_gem_init_global_gtt(dev, args->gtt_start,
+					 args->gtt_end, args->gtt_end);
+	DRM_UNLOCK(dev);
+	return (error);
 }
 
 int
@@ -341,19 +338,28 @@
 	drm_i915_private_t *dev_priv;
 	int ret;
 
+	DRM_LOCK(dev);
+
 	dev_priv = dev->dev_private;
-	if (dev_priv->mm.suspended)
+	if (dev_priv->mm.suspended) {
+		DRM_UNLOCK(dev);
 		return (0);
+	}
 
-	ret = i915_gpu_idle(dev, true);
-	if (ret != 0)
+	ret = i915_gpu_idle(dev);
+	if (ret != 0) {
+		DRM_UNLOCK(dev);
 		return (ret);
+	}
+	i915_gem_retire_requests(dev);
 
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = i915_gem_evict_inactive(dev, false);
-		if (ret != 0)
+		ret = i915_gem_evict_everything(dev, false);
+		if (ret != 0) {
+			DRM_UNLOCK(dev);
 			return ret;
+		}
 	}
 
 	i915_gem_reset_fences(dev);
@@ -368,6 +374,8 @@
 	i915_kernel_lost_context(dev);
 	i915_gem_cleanup_ringbuffer(dev);
 
+	DRM_UNLOCK(dev);
+
 	/* Cancel the retire work handler, which should be idle now. */
 	taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
 	return (ret);
@@ -390,15 +398,15 @@
 	if (IS_GEN5(dev))
 		return;
 
+
 	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
 	if (IS_GEN6(dev))
-		I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
 	else
-		I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
 }
 
-void
-i915_gem_init_ppgtt(struct drm_device *dev)
+void i915_gem_init_ppgtt(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv;
 	struct i915_hw_ppgtt *ppgtt;
@@ -426,21 +434,27 @@
 	pd_offset <<= 16;
 
 	if (INTEL_INFO(dev)->gen == 6) {
-		uint32_t ecochk = I915_READ(GAM_ECOCHK);
+		uint32_t ecochk, gab_ctl, ecobits;
+
+		ecobits = I915_READ(GAC_ECO_BITS); 
+		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+		gab_ctl = I915_READ(GAB_CTL);
+		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+		ecochk = I915_READ(GAM_ECOCHK);
 		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
 				       ECOCHK_PPGTT_CACHE64B);
-		I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 	} else if (INTEL_INFO(dev)->gen >= 7) {
 		I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
 		/* GFX_MODE is per-ring on gen7+ */
 	}
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ring = &dev_priv->rings[i];
-
+	for_each_ring(ring, dev_priv, i) {
 		if (INTEL_INFO(dev)->gen >= 7)
 			I915_WRITE(RING_MODE_GEN7(ring),
-				   GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
 		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
@@ -474,6 +488,7 @@
 	}
 
 	dev_priv->next_seqno = 1;
+	i915_gem_context_init(dev);
 	i915_gem_init_ppgtt(dev);
 	return (0);
 
@@ -484,6 +499,69 @@
 	return (ret);
 }
 
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+	if (i915_enable_ppgtt >= 0)
+		return i915_enable_ppgtt;
+
+	/* Disable ppgtt on SNB if VT-d is on. */
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
+		return false;
+
+	return true;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long gtt_size, mappable_size;
+	int ret;
+
+	gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT;
+	mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT;
+
+	DRM_LOCK(dev);
+	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
+		 * aperture accordingly when using aliasing ppgtt. */
+		gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+		i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
+
+		ret = i915_gem_init_aliasing_ppgtt(dev);
+		if (ret) {
+			DRM_UNLOCK(dev);
+			return ret;
+		}
+	} else {
+		/* Let GEM Manage all of the aperture.
+		 *
+		 * However, leave one page at the end still bound to the scratch
+		 * page.  There are a number of places where the hardware
+		 * apparently prefetches past the end of the object, and we've
+		 * seen multiple hangs with the GPU head pointer stuck in a
+		 * batchbuffer bound at the last page of the aperture.  One page
+		 * should be enough to keep any prefetching inside of the
+		 * aperture.
+		 */
+		i915_gem_init_global_gtt(dev, 0, mappable_size,
+					 gtt_size);
+	}
+
+	ret = i915_gem_init_hw(dev);
+	DRM_UNLOCK(dev);
+	if (ret != 0) {
+		i915_gem_cleanup_aliasing_ppgtt(dev);
+		return (ret);
+	}
+
+	/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		dev_priv->dri1.allow_batchbuffer = 1;
+	return 0;
+}
+
 int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
     struct drm_file *file)
@@ -496,13 +574,11 @@
 	dev_priv = dev->dev_private;
 	args = data;
 
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return (-ENODEV);
-
 	pinned = 0;
 	DRM_LOCK(dev);
-	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
-		pinned += obj->gtt_space->size;
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+		if (obj->pin_count)
+			pinned += obj->gtt_space->size;
 	DRM_UNLOCK(dev);
 
 	args->aper_size = dev_priv->mm.gtt_total;
@@ -515,16 +591,11 @@
 i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
      bool map_and_fenceable)
 {
-	struct drm_device *dev;
-	struct drm_i915_private *dev_priv;
 	int ret;
 
-	dev = obj->base.dev;
-	dev_priv = dev->dev_private;
+	if (obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)
+		return (-EBUSY);
 
-	KASSERT(obj->pin_count != DRM_I915_GEM_OBJECT_MAX_PIN_COUNT,
-	    ("Max pin count"));
-
 	if (obj->gtt_space != NULL) {
 		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
 		    (map_and_fenceable && !obj->map_and_fenceable)) {
@@ -547,47 +618,24 @@
 			return (ret);
 	}
 
-	if (obj->pin_count++ == 0 && !obj->active)
-		list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+	if (!obj->has_global_gtt_mapping && map_and_fenceable)
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+	obj->pin_count++;
 	obj->pin_mappable |= map_and_fenceable;
 
-#if 1
-	KIB_NOTYET();
-#else
-	WARN_ON(i915_verify_lists(dev));
-#endif
-	return (0);
+	return 0;
 }
 
 void
 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev;
-	drm_i915_private_t *dev_priv;
 
-	dev = obj->base.dev;
-	dev_priv = dev->dev_private;
-
-#if 1
-	KIB_NOTYET();
-#else
-	WARN_ON(i915_verify_lists(dev));
-#endif
-	
 	KASSERT(obj->pin_count != 0, ("zero pin count"));
 	KASSERT(obj->gtt_space != NULL, ("No gtt mapping"));
 
-	if (--obj->pin_count == 0) {
-		if (!obj->active)
-			list_move_tail(&obj->mm_list,
-			    &dev_priv->mm.inactive_list);
+	if (--obj->pin_count == 0)
 		obj->pin_mappable = false;
-	}
-#if 1
-	KIB_NOTYET();
-#else
-	WARN_ON(i915_verify_lists(dev));
-#endif
 }
 
 int
@@ -689,7 +737,6 @@
 {
 	struct drm_i915_gem_busy *args;
 	struct drm_i915_gem_object *obj;
-	struct drm_i915_gem_request *request;
 	int ret;
 
 	args = data;
@@ -709,13 +756,9 @@
 		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
 			ret = i915_gem_flush_ring(obj->ring,
 			    0, obj->base.write_domain);
-		} else if (obj->ring->outstanding_lazy_request ==
-		    obj->last_rendering_seqno) {
-			request = malloc(sizeof(*request), DRM_I915_GEM,
-			    M_WAITOK | M_ZERO);
-			ret = i915_add_request(obj->ring, NULL, request);
-			if (ret != 0)
-				free(request, DRM_I915_GEM);
+		} else {
+			ret = i915_gem_check_olr(obj->ring,
+						 obj->last_rendering_seqno);
 		}
 
 		i915_gem_retire_requests_ring(obj->ring);
@@ -740,7 +783,7 @@
 	int ret;
 
 	dev_priv = dev->dev_private;
-	if (atomic_read(&dev_priv->mm.wedged))
+	if (atomic_load_acq_int(&dev_priv->mm.wedged))
 		return (-EIO);
 
 	file_priv = file->driver_priv;
@@ -759,26 +802,7 @@
 	if (seqno == 0)
 		return (0);
 
-	ret = 0;
-	mtx_lock(&ring->irq_lock);
-	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
-		if (ring->irq_get(ring)) {
-			while (ret == 0 &&
-			    !(i915_seqno_passed(ring->get_seqno(ring), seqno) ||
-			    atomic_read(&dev_priv->mm.wedged)))
-				ret = -msleep(ring, &ring->irq_lock, PCATCH,
-				    "915thr", 0);
-			ring->irq_put(ring);
-			if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
-				ret = -EIO;
-		} else if (_intel_wait_for(dev,
-		    i915_seqno_passed(ring->get_seqno(ring), seqno) ||
-		    atomic_read(&dev_priv->mm.wedged), 3000, 0, "915rtr")) {
-			ret = -EBUSY;
-		}
-	}
-	mtx_unlock(&ring->irq_lock);
-
+	ret = __wait_seqno(ring, seqno, true);
 	if (ret == 0)
 		taskqueue_enqueue_timeout(dev_priv->tq,
 		    &dev_priv->mm.retire_task, 0);
@@ -843,11 +867,12 @@
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv;
+	struct intel_ring_buffer *ring;
 	int i;
 
 	dev_priv = dev->dev_private;
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
+	for_each_ring(ring, dev_priv, i)
+		intel_cleanup_ring_buffer(ring);
 }
 
 int
@@ -855,7 +880,7 @@
     struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv;
-	int ret, i;
+	int ret;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return (0);
@@ -865,10 +890,12 @@
 		atomic_store_rel_int(&dev_priv->mm.wedged, 0);
 	}
 
+	DRM_LOCK(dev);
 	dev_priv->mm.suspended = 0;
 
 	ret = i915_gem_init_hw(dev);
 	if (ret != 0) {
+		DRM_UNLOCK(dev);
 		return (ret);
 	}
 
@@ -875,16 +902,9 @@
 	KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
 	KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
 	KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		KASSERT(list_empty(&dev_priv->rings[i].active_list),
-		    ("ring %d active list", i));
-		KASSERT(list_empty(&dev_priv->rings[i].request_list),
-		    ("ring %d request list", i));
-	}
+	DRM_UNLOCK(dev);
 
-	DRM_UNLOCK(dev);
 	ret = drm_irq_install(dev);
-	DRM_LOCK(dev);
 	if (ret)
 		goto cleanup_ringbuffer;
 
@@ -891,8 +911,10 @@
 	return (0);
 
 cleanup_ringbuffer:
+	DRM_LOCK(dev);
 	i915_gem_cleanup_ringbuffer(dev);
 	dev_priv->mm.suspended = 1;
+	DRM_UNLOCK(dev);
 
 	return (ret);
 }
@@ -925,13 +947,12 @@
 	if (obj == NULL)
 		return (-ENOMEM);
 
-	handle = 0;
 	ret = drm_gem_handle_create(file, &obj->base, &handle);
 	if (ret != 0) {
 		drm_gem_object_release(&obj->base);
 		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
 		free(obj, DRM_I915_GEM);
-		return (-ret);
+		return (ret);
 	}
 
 	/* drop reference from allocate - handle holds it now */
@@ -969,214 +990,798 @@
 	return (i915_gem_create(file, dev, args->size, &args->handle));
 }
 
+#define __user
+#define __force
+#define __iomem
+#define	to_user_ptr(x) ((void *)(uintptr_t)(x))
+#define	offset_in_page(x) ((x) & PAGE_MASK)
+#define	page_to_phys(x) VM_PAGE_TO_PHYS(x)
+static inline int
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
+{
+	return (copyout_nofault(from, to, n) != 0 ? n : 0);
+}
+static inline unsigned long
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+    unsigned long n)
+{
+
+	/*
+	 * XXXKIB.  Equivalent Linux function is implemented using
+	 * MOVNTI for aligned moves.  For unaligned head and tail,
+	 * normal move is performed.  As such, it is not incorrect, if
+	 * only somewhat slower, to use normal copyin.  All uses
+	 * except shmem_pwrite_fast() have the destination mapped WC.
+	 */
+	return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0));
+}
+static inline int
+fault_in_multipages_readable(const char __user *uaddr, int size)
+{
+	char c;
+	int ret = 0;
+	const char __user *end = uaddr + size - 1;
+
+	if (unlikely(size == 0))
+		return ret;
+
+	while (uaddr <= end) {
+		ret = -copyin(uaddr, &c, 1);
+		if (ret != 0)
+			return -EFAULT;
+		uaddr += PAGE_SIZE;
+	}
+
+	/* Check whether the range spilled into the next page. */
+	if (((unsigned long)uaddr & ~PAGE_MASK) ==
+			((unsigned long)end & ~PAGE_MASK)) {
+		ret = -copyin(end, &c, 1);
+	}
+
+	return ret;
+}
+
+static inline int
+fault_in_multipages_writeable(char __user *uaddr, int size)
+{
+	int ret = 0;
+	char __user *end = uaddr + size - 1;
+
+	if (unlikely(size == 0))
+		return ret;
+
+	/*
+	 * Writing zeroes into userspace here is OK, because we know that if
+	 * the zero gets there, we'll be overwriting it.
+	 */
+	while (uaddr <= end) {
+		ret = subyte(uaddr, 0);
+		if (ret != 0)
+			return -EFAULT;
+		uaddr += PAGE_SIZE;
+	}
+
+	/* Check whether the range spilled into the next page. */
+	if (((unsigned long)uaddr & ~PAGE_MASK) ==
+			((unsigned long)end & ~PAGE_MASK))
+		ret = subyte(end, 0);
+
+	return ret;
+}
+
+static inline int
+__copy_to_user_swizzled(char __user *cpu_vaddr,
+			const char *gpu_vaddr, int gpu_offset,
+			int length)
+{
+	int ret, cpu_offset = 0;
+
+	while (length > 0) {
+		int cacheline_end = roundup2(gpu_offset + 1, 64);
+		int this_length = min(cacheline_end - gpu_offset, length);
+		int swizzled_gpu_offset = gpu_offset ^ 64;
+
+		ret = __copy_to_user(cpu_vaddr + cpu_offset,
+				     gpu_vaddr + swizzled_gpu_offset,
+				     this_length);
+		if (ret)
+			return ret + length;
+
+		cpu_offset += this_length;
+		gpu_offset += this_length;
+		length -= this_length;
+	}
+
+	return 0;
+}
+
+static inline int
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+			  const char __user *cpu_vaddr,
+			  int length)
+{
+	int ret, cpu_offset = 0;
+
+	while (length > 0) {
+		int cacheline_end = roundup2(gpu_offset + 1, 64);
+		int this_length = min(cacheline_end - gpu_offset, length);
+		int swizzled_gpu_offset = gpu_offset ^ 64;
+
+		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
+				       cpu_vaddr + cpu_offset,
+				       this_length);
+		if (ret)
+			return ret + length;
+
+		cpu_offset += this_length;
+		gpu_offset += this_length;
+		length -= this_length;
+	}
+
+	return 0;
+}
+
 static int
-i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
-    uint64_t data_ptr, uint64_t size, uint64_t offset, enum uio_rw rw,
-    struct drm_file *file)
+i915_gem_phys_pwrite(struct drm_device *dev,
+		     struct drm_i915_gem_object *obj,
+		     struct drm_i915_gem_pwrite *args,
+		     struct drm_file *file_priv)
 {
-	vm_object_t vm_obj;
-	vm_page_t m;
-	struct sf_buf *sf;
-	vm_offset_t mkva;
-	vm_pindex_t obj_pi;
-	int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
+	void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
+	char __user *user_data = to_user_ptr(args->data_ptr);
 
-	if (obj->gtt_offset != 0 && rw == UIO_READ)
-		do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-	else
-		do_bit17_swizzling = 0;
+	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+		unsigned long unwritten;
 
-	obj->dirty = 1;
-	vm_obj = obj->base.vm_obj;
-	ret = 0;
+		/* The physical object once assigned is fixed for the lifetime
+		 * of the obj, so we can safely drop the lock and continue
+		 * to access vaddr.
+		 */
+		DRM_UNLOCK(dev);
+		unwritten = copy_from_user(vaddr, user_data, args->size);
+		DRM_LOCK(dev);
+		if (unwritten)
+			return -EFAULT;
+	}
 
-	VM_OBJECT_LOCK(vm_obj);
-	vm_object_pip_add(vm_obj, 1);
-	while (size > 0) {
-		obj_pi = OFF_TO_IDX(offset);
-		obj_po = offset & PAGE_MASK;
+	i915_gem_chipset_flush(dev);
+	return 0;
+}
 
-		m = i915_gem_wire_page(vm_obj, obj_pi);
-		VM_OBJECT_UNLOCK(vm_obj);
+/* Per-page copy function for the shmem pread fastpath.
+ * Flushes invalid cachelines before reading the target if
+ * needs_clflush is set. */
+static int
+shmem_pread_fast(vm_page_t page, int shmem_page_offset, int page_length,
+		 char __user *user_data,
+		 bool page_do_bit17_swizzling, bool needs_clflush)
+{
+	char *vaddr;
+	struct sf_buf *sf;
+	int ret;
 
-		sched_pin();
-		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
-		mkva = sf_buf_kva(sf);
-		length = min(size, PAGE_SIZE - obj_po);
-		while (length > 0) {
-			if (do_bit17_swizzling &&
-			    (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
-				cnt = roundup2(obj_po + 1, 64);
-				cnt = min(cnt - obj_po, length);
-				swizzled_po = obj_po ^ 64;
-			} else {
-				cnt = length;
-				swizzled_po = obj_po;
-			}
-			if (rw == UIO_READ)
-				ret = -copyout_nofault(
-				    (char *)mkva + swizzled_po,
-				    (void *)(uintptr_t)data_ptr, cnt);
-			else
-				ret = -copyin_nofault(
-				    (void *)(uintptr_t)data_ptr,
-				    (char *)mkva + swizzled_po, cnt);
-			if (ret != 0)
-				break;
-			data_ptr += cnt;
-			size -= cnt;
-			length -= cnt;
-			offset += cnt;
-			obj_po += cnt;
-		}
-		sf_buf_free(sf);
+	if (unlikely(page_do_bit17_swizzling))
+		return -EINVAL;
+
+	sched_pin();
+	sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
+	if (sf == NULL) {
 		sched_unpin();
-		VM_OBJECT_LOCK(vm_obj);
-		if (rw == UIO_WRITE)
-			vm_page_dirty(m);
-		vm_page_reference(m);
-		vm_page_lock(m);
-		vm_page_unwire(m, 1);
-		vm_page_unlock(m);
-		atomic_add_long(&i915_gem_wired_pages_cnt, -1);
+		return (-EFAULT);
+	}
+	vaddr = (char *)sf_buf_kva(sf);
+	if (needs_clflush)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	ret = __copy_to_user_inatomic(user_data,
+				      vaddr + shmem_page_offset,
+				      page_length);
+	sf_buf_free(sf);
+	sched_unpin();
 
-		if (ret != 0)
-			break;
+	return ret ? -EFAULT : 0;
+}
+
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+			     bool swizzled)
+{
+	if (unlikely(swizzled)) {
+		unsigned long start = (unsigned long) addr;
+		unsigned long end = (unsigned long) addr + length;
+
+		/* For swizzling simply ensure that we always flush both
+		 * channels. Lame, but simple and it works. Swizzled
+		 * pwrite/pread is far from a hotpath - current userspace
+		 * doesn't use it at all. */
+		start = rounddown2(start, 128);
+		end = roundup2(end, 128);
+
+		drm_clflush_virt_range((void *)start, end - start);
+	} else {
+		drm_clflush_virt_range(addr, length);
 	}
-	vm_object_pip_wakeup(vm_obj);
-	VM_OBJECT_UNLOCK(vm_obj);
 
-	return (ret);
 }
 
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
 static int
-i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
-    uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
+shmem_pread_slow(vm_page_t page, int shmem_page_offset, int page_length,
+		 char __user *user_data,
+		 bool page_do_bit17_swizzling, bool needs_clflush)
 {
-	vm_offset_t mkva;
-	vm_pindex_t obj_pi;
-	int obj_po, ret;
+	char *vaddr;
+	struct sf_buf *sf;
+	int ret;
 
-	obj_pi = OFF_TO_IDX(offset);
-	obj_po = offset & PAGE_MASK;
+	sf = sf_buf_alloc(page, 0);
+	vaddr = (char *)sf_buf_kva(sf);
+	if (needs_clflush)
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
 
-	mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
-	    IDX_TO_OFF(obj_pi), size, PAT_WRITE_COMBINING);
-	ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva +
-	    obj_po, size);
-	pmap_unmapdev(mkva, size);
-	return (ret);
+	if (page_do_bit17_swizzling)
+		ret = __copy_to_user_swizzled(user_data,
+					      vaddr, shmem_page_offset,
+					      page_length);
+	else
+		ret = __copy_to_user(user_data,
+				     vaddr + shmem_page_offset,
+				     page_length);
+	sf_buf_free(sf);
+
+	return ret ? - EFAULT : 0;
 }
 
 static int
-i915_gem_obj_io(struct drm_device *dev, uint32_t handle, uint64_t data_ptr,
-    uint64_t size, uint64_t offset, enum uio_rw rw, struct drm_file *file)
+i915_gem_shmem_pread(struct drm_device *dev,
+		     struct drm_i915_gem_object *obj,
+		     struct drm_i915_gem_pread *args,
+		     struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj;
-	vm_page_t *ma;
-	vm_offset_t start, end;
-	int npages, ret;
+	char __user *user_data;
+	ssize_t remain, sremain;
+	off_t offset, soffset;
+	int shmem_page_offset, page_length, ret = 0;
+	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+	int prefaulted = 0;
+	int needs_clflush = 0;
 
-	if (size == 0)
-		return (0);
-	start = trunc_page(data_ptr);
-	end = round_page(data_ptr + size);
-	npages = howmany(end - start, PAGE_SIZE);
-	ma = malloc(npages * sizeof(vm_page_t), DRM_I915_GEM, M_WAITOK |
-	    M_ZERO);
-	npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
-	    (vm_offset_t)data_ptr, size,
-	    (rw == UIO_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, ma, npages);
-	if (npages == -1) {
-		ret = -EFAULT;
-		goto free_ma;
+	user_data = to_user_ptr(args->data_ptr);
+	sremain = remain = args->size;
+
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+		/* If we're not in the cpu read domain, set ourself into the gtt
+		 * read domain and manually flush cachelines (if required). This
+		 * optimizes for the case when the gpu will dirty the data
+		 * anyway again before the next pread happens. */
+		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
+		ret = i915_gem_object_set_to_gtt_domain(obj, false);
+		if (ret)
+			return ret;
 	}
 
+	soffset = offset = args->offset;
+	ret = i915_gem_object_get_pages_range(obj, soffset, soffset + sremain);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	VM_OBJECT_WLOCK(obj->base.vm_obj);
+	for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
+	    OFF_TO_IDX(offset));; page = vm_page_next(page)) {
+		VM_OBJECT_WUNLOCK(obj->base.vm_obj);
+
+		if (remain <= 0)
+			break;
+
+		/* Operation in this page
+		 *
+		 * shmem_page_offset = offset within page in shmem file
+		 * page_length = bytes to copy for this page
+		 */
+		shmem_page_offset = offset_in_page(offset);
+		page_length = remain;
+		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - shmem_page_offset;
+
+		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+			(page_to_phys(page) & (1 << 17)) != 0;
+
+		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+				       user_data, page_do_bit17_swizzling,
+				       needs_clflush);
+		if (ret == 0)
+			goto next_page;
+
+		DRM_UNLOCK(dev);
+
+		if (likely(!i915_prefault_disable) && !prefaulted) {
+			ret = fault_in_multipages_writeable(user_data, remain);
+			/* Userspace is tricking us, but we've already clobbered
+			 * its pages with the prefault and promised to write the
+			 * data up to the first fault. Hence ignore any errors
+			 * and just continue. */
+			(void)ret;
+			prefaulted = 1;
+		}
+
+		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+				       user_data, page_do_bit17_swizzling,
+				       needs_clflush);
+
+		DRM_LOCK(dev);
+
+next_page:
+		vm_page_reference(page);
+
+		if (ret)
+			goto out;
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+		VM_OBJECT_WLOCK(obj->base.vm_obj);
+	}
+
+out:
+	i915_gem_object_unpin_pages(obj);
+	i915_gem_object_put_pages_range(obj, soffset, soffset + sremain);
+
+	return ret;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file)
+{
+	struct drm_i915_gem_pread *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	if (args->size == 0)
+		return 0;
+
+	if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_WRITE))
+		return -EFAULT;
+
 	ret = i915_mutex_lock_interruptible(dev);
-	if (ret != 0)
-		goto unlocked;
+	if (ret)
+		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	if (offset > obj->base.size || size > obj->base.size - offset) {
+
+	/* Bounds check source.  */
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (rw == UIO_READ) {
-		CTR3(KTR_DRM, "object_pread %p %jx %jx", obj, offset, size);
-		ret = i915_gem_object_set_cpu_read_domain_range(obj,
-		    offset, size);
-		if (ret != 0)
-			goto out;
-		ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
-		    UIO_READ, file);
-	} else {
-		if (obj->phys_obj) {
-			CTR3(KTR_DRM, "object_phys_write %p %jx %jx", obj,
-			    offset, size);
-			ret = i915_gem_phys_pwrite(dev, obj, data_ptr, offset,
-			    size, file);
-		} else if (obj->gtt_space &&
-		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
-			CTR3(KTR_DRM, "object_gtt_write %p %jx %jx", obj,
-			    offset, size);
-			ret = i915_gem_object_pin(obj, 0, true);
-			if (ret != 0)
-				goto out;
-			ret = i915_gem_object_set_to_gtt_domain(obj, true);
-			if (ret != 0)
-				goto out_unpin;
-			ret = i915_gem_object_put_fence(obj);
-			if (ret != 0)
-				goto out_unpin;
-			ret = i915_gem_gtt_write(dev, obj, data_ptr, size,
-			    offset, file);
-out_unpin:
-			i915_gem_object_unpin(obj);
-		} else {
-			CTR3(KTR_DRM, "object_pwrite %p %jx %jx", obj,
-			    offset, size);
-			ret = i915_gem_object_set_to_cpu_domain(obj, true);
-			if (ret != 0)
-				goto out;
-			ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
-			    UIO_WRITE, file);
-		}
+#if 1
+	KIB_NOTYET();
+#else
+	/* prime objects have no backing filp to GEM pread/pwrite
+	 * pages from.
+	 */
+	if (!obj->base.filp) {
+		ret = -EINVAL;
+		goto out;
 	}
+#endif
+
+	CTR3(KTR_DRM, "pread %p %jx %jx", obj, args->offset, args->size);
+
+	ret = i915_gem_shmem_pread(dev, obj, args, file);
+
 out:
 	drm_gem_object_unreference(&obj->base);
 unlock:
 	DRM_UNLOCK(dev);
-unlocked:
-	vm_page_unhold_pages(ma, npages);
-free_ma:
-	free(ma, DRM_I915_GEM);
-	return (ret);
+	return ret;
 }
 
-int
-i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+/* This is the fast write path which cannot handle
+ * page faults in the source data
+ */
+
+static inline int
+fast_user_write(struct drm_device *dev,
+		off_t page_base, int page_offset,
+		char __user *user_data,
+		int length)
 {
-	struct drm_i915_gem_pread *args;
+	void __iomem *vaddr_atomic;
+	void *vaddr;
+	unsigned long unwritten;
 
-	args = data;
-	return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
-	    args->offset, UIO_READ, file));
+	vaddr_atomic = pmap_mapdev_attr(dev->agp->base + page_base,
+	    length, PAT_WRITE_COMBINING);
+	/* We can use the cpu mem copy function because this is X86. */
+	vaddr = (char *)vaddr_atomic + page_offset;
+	unwritten = __copy_from_user_inatomic_nocache(vaddr,
+						      user_data, length);
+	pmap_unmapdev((vm_offset_t)vaddr_atomic, length);
+	return unwritten;
 }
 
+/**
+ * This is the fast pwrite path, where we copy the data directly from the
+ * user into the GTT, uncached.
+ */
+static int
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+			 struct drm_i915_gem_object *obj,
+			 struct drm_i915_gem_pwrite *args,
+			 struct drm_file *file)
+{
+	ssize_t remain;
+	off_t offset, page_base;
+	char __user *user_data;
+	int page_offset, page_length, ret;
+
+	ret = i915_gem_object_pin(obj, 0, true);
+	/* XXXKIB ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); */
+	if (ret != 0)
+		goto out;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret)
+		goto out_unpin;
+
+	ret = i915_gem_object_put_fence(obj);
+	if (ret)
+		goto out_unpin;
+
+	user_data = to_user_ptr(args->data_ptr);
+	remain = args->size;
+
+	offset = obj->gtt_offset + args->offset;
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * page_base = page offset within aperture
+		 * page_offset = offset within page
+		 * page_length = bytes to copy for this page
+		 */
+		page_base = offset & ~PAGE_MASK;
+		page_offset = offset_in_page(offset);
+		page_length = remain;
+		if ((page_offset + remain) > PAGE_SIZE)
+			page_length = PAGE_SIZE - page_offset;
+
+		/* If we get a fault while copying data, then (presumably) our
+		 * source page isn't available.  Return the error and we'll
+		 * retry in the slow path.
+		 */
+		if (fast_user_write(dev, page_base,
+				    page_offset, user_data, page_length)) {
+			ret = -EFAULT;
+			goto out_unpin;
+		}
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+out_unpin:
+	i915_gem_object_unpin(obj);
+out:
+	return ret;
+}
+
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set. */
+static int
+shmem_pwrite_fast(vm_page_t page, int shmem_page_offset, int page_length,
+		  char __user *user_data,
+		  bool page_do_bit17_swizzling,
+		  bool needs_clflush_before,
+		  bool needs_clflush_after)
+{
+	char *vaddr;
+	struct sf_buf *sf;
+	int ret;
+
+	if (unlikely(page_do_bit17_swizzling))
+		return -EINVAL;
+
+	sched_pin();
+	sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
+	if (sf == NULL) {
+		sched_unpin();
+		return (-EFAULT);
+	}
+	vaddr = (char *)sf_buf_kva(sf);
+	if (needs_clflush_before)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
+						user_data,
+						page_length);
+	if (needs_clflush_after)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	sf_buf_free(sf);
+	sched_unpin();
+
+	return ret ? -EFAULT : 0;
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pwrite_slow(vm_page_t page, int shmem_page_offset, int page_length,
+		  char __user *user_data,
+		  bool page_do_bit17_swizzling,
+		  bool needs_clflush_before,
+		  bool needs_clflush_after)
+{
+	char *vaddr;
+	struct sf_buf *sf;
+	int ret;
+
+	sf = sf_buf_alloc(page, 0);
+	vaddr = (char *)sf_buf_kva(sf);
+	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
+	if (page_do_bit17_swizzling)
+		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
+						user_data,
+						page_length);
+	else
+		ret = __copy_from_user(vaddr + shmem_page_offset,
+				       user_data,
+				       page_length);
+	if (needs_clflush_after)
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
+	sf_buf_free(sf);
+
+	return ret ? -EFAULT : 0;
+}
+
+static int
+i915_gem_shmem_pwrite(struct drm_device *dev,
+		      struct drm_i915_gem_object *obj,
+		      struct drm_i915_gem_pwrite *args,
+		      struct drm_file *file)
+{
+	ssize_t remain, sremain;
+	off_t offset, soffset;
+	char __user *user_data;
+	int shmem_page_offset, page_length, ret = 0;
+	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+	int hit_slowpath = 0;
+	int needs_clflush_after = 0;
+	int needs_clflush_before = 0;
+
+	user_data = to_user_ptr(args->data_ptr);
+	sremain = remain = args->size;
+
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+		/* If we're not in the cpu write domain, set ourself into the gtt
+		 * write domain and manually flush cachelines (if required). This
+		 * optimizes for the case when the gpu will use the data
+		 * right away and we therefore have to clflush anyway. */
+		needs_clflush_after = cpu_write_needs_clflush(obj);
+		ret = i915_gem_object_set_to_gtt_domain(obj, true);
+		if (ret)
+			return ret;
+	}
+	/* Same trick applies to invalidate partially written cachelines read
+	 * before writing. */
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+		needs_clflush_before =
+			!cpu_cache_is_coherent(dev, obj->cache_level);
+
+	soffset = offset = args->offset;
+	ret = i915_gem_object_get_pages_range(obj, soffset, soffset + sremain);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	obj->dirty = 1;
+
+	VM_OBJECT_WLOCK(obj->base.vm_obj);
+	for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
+	    OFF_TO_IDX(offset));; page = vm_page_next(page)) {
+		VM_OBJECT_WUNLOCK(obj->base.vm_obj);
+		int partial_cacheline_write;
+
+		if (remain <= 0)
+			break;
+
+		/* Operation in this page
+		 *
+		 * shmem_page_offset = offset within page in shmem file
+		 * page_length = bytes to copy for this page
+		 */
+		shmem_page_offset = offset_in_page(offset);
+
+		page_length = remain;
+		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - shmem_page_offset;
+
+		/* If we don't overwrite a cacheline completely we need to be
+		 * careful to have up-to-date data by first clflushing. Don't
+		 * overcomplicate things and flush the entire patch. */
+		partial_cacheline_write = needs_clflush_before &&
+			((shmem_page_offset | page_length)
+				& (cpu_clflush_line_size - 1));
+
+		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+			(page_to_phys(page) & (1 << 17)) != 0;
+
+		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+					user_data, page_do_bit17_swizzling,
+					partial_cacheline_write,
+					needs_clflush_after);
+		if (ret == 0)
+			goto next_page;
+
+		hit_slowpath = 1;
+		DRM_UNLOCK(dev);
+		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+					user_data, page_do_bit17_swizzling,
+					partial_cacheline_write,
+					needs_clflush_after);
+
+		DRM_LOCK(dev);
+
+next_page:
+		vm_page_dirty(page);
+		vm_page_reference(page);
+
+		if (ret)
+			goto out;
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+		VM_OBJECT_WLOCK(obj->base.vm_obj);
+	}
+
+out:
+	i915_gem_object_unpin_pages(obj);
+	i915_gem_object_put_pages_range(obj, soffset, soffset + sremain);
+
+	if (hit_slowpath) {
+		/*
+		 * Fixup: Flush cpu caches in case we didn't flush the dirty
+		 * cachelines in-line while writing and the object moved
+		 * out of the cpu write domain while we've dropped the lock.
+		 */
+		if (!needs_clflush_after &&
+		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+			i915_gem_clflush_object(obj);
+				i915_gem_chipset_flush(dev);
+		}
+	}
+
+	if (needs_clflush_after)
+		i915_gem_chipset_flush(dev);
+
+	return ret;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
 int
-i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file)
 {
-	struct drm_i915_gem_pwrite *args;
+	struct drm_i915_gem_pwrite *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
 
-	args = data;
-	return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
-	    args->offset, UIO_WRITE, file));
+	if (args->size == 0)
+		return 0;
+
+	if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_READ))
+		return -EFAULT;
+
+	if (likely(!i915_prefault_disable)) {
+		ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+						   args->size);
+		if (ret)
+			return -EFAULT;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Bounds check destination. */
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+#if 1
+	KIB_NOTYET();
+#else
+	/* prime objects have no backing filp to GEM pread/pwrite
+	 * pages from.
+	 */
+	if (!obj->base.filp) {
+		ret = -EINVAL;
+		goto out;
+	}
+#endif
+
+	CTR3(KTR_DRM, "pwrite %p %jx %jx", obj, args->offset, args->size);
+
+	ret = -EFAULT;
+	/* We can only do the GTT pwrite on untiled buffers, as otherwise
+	 * it would end up going through the fenced access, and we'll get
+	 * different detiling behavior between reading and writing.
+	 * pread/pwrite currently are reading and writing from the CPU
+	 * perspective, requiring manual detiling by the client.
+	 */
+	if (obj->phys_obj) {
+		ret = i915_gem_phys_pwrite(dev, obj, args, file);
+		goto out;
+	}
+
+	if (obj->tiling_mode == I915_TILING_NONE &&
+	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+	    cpu_write_needs_clflush(obj)) {
+		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+		/* Note that the gtt paths might fail with non-page-backed user
+		 * pointers (e.g. gtt mappings when moving data between
+		 * textures). Fallback to the shmem path in that case. */
+	}
+
+	if (ret == -EFAULT || ret == -ENOSPC)
+		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	DRM_UNLOCK(dev);
+	return ret;
 }
+#undef __user
+#undef __force
+#undef __iomem
+#undef to_user_ptr
+#undef offset_in_page
+#undef page_to_phys
 
 int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -1188,9 +1793,6 @@
 	uint32_t write_domain;
 	int ret;
 
-	if ((dev->driver->driver_features & DRIVER_GEM) == 0)
-		return (-ENODEV);
-
 	args = data;
 	read_domains = args->read_domains;
 	write_domain = args->write_domain;
@@ -1232,9 +1834,7 @@
 	int ret;
 
 	args = data;
-	ret = 0;
-	if ((dev->driver->driver_features & DRIVER_GEM) == 0)
-		return (ENODEV);
+
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret != 0)
 		return (ret);
@@ -1265,9 +1865,6 @@
 
 	args = data;
 
-	if ((dev->driver->driver_features & DRIVER_GEM) == 0)
-		return (-ENODEV);
-
 	obj = drm_gem_object_lookup(dev, file, args->handle);
 	if (obj == NULL)
 		return (-ENOENT);
@@ -1280,7 +1877,7 @@
 	PROC_LOCK(p);
 	if (map->size + size > lim_cur(p, RLIMIT_VMEM)) {
 		PROC_UNLOCK(p);
-		error = ENOMEM;
+		error = -ENOMEM;
 		goto out;
 	}
 	PROC_UNLOCK(p);
@@ -1287,10 +1884,9 @@
 
 	addr = 0;
 	vm_object_reference(obj->vm_obj);
-	DRM_UNLOCK(dev);
-	rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size,
-	    VMFS_ANY_SPACE, VM_PROT_READ | VM_PROT_WRITE,
-	    VM_PROT_READ | VM_PROT_WRITE, MAP_SHARED);
+	rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
+	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
+	    VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
 	if (rv != KERN_SUCCESS) {
 		vm_object_deallocate(obj->vm_obj);
 		error = -vm_mmap_to_errno(rv);
@@ -1297,7 +1893,6 @@
 	} else {
 		args->addr_ptr = (uint64_t)addr;
 	}
-	DRM_LOCK(dev);
 out:
 	drm_gem_object_unreference(obj);
 	return (error);
@@ -1356,9 +1951,8 @@
 		*mres = NULL;
 	} else
 		oldm = NULL;
+	VM_OBJECT_WUNLOCK(vm_obj);
 retry:
-	VM_OBJECT_UNLOCK(vm_obj);
-unlocked_vmobj:
 	cause = ret = 0;
 	m = NULL;
 
@@ -1376,17 +1970,19 @@
 	 * faulted on the same GTT address and instantiated the
 	 * mapping for the page.  Recheck.
 	 */
-	VM_OBJECT_LOCK(vm_obj);
+	VM_OBJECT_WLOCK(vm_obj);
 	m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
 	if (m != NULL) {
-		if ((m->flags & VPO_BUSY) != 0) {
+		if (vm_page_busied(m)) {
 			DRM_UNLOCK(dev);
-			vm_page_sleep(m, "915pee");
+			vm_page_lock(m);
+			VM_OBJECT_WUNLOCK(vm_obj);
+			vm_page_busy_sleep(m, "915pee", false);
 			goto retry;
 		}
 		goto have_page;
 	} else
-		VM_OBJECT_UNLOCK(vm_obj);
+		VM_OBJECT_WUNLOCK(vm_obj);
 
 	/* Now bind it into the GTT if needed */
 	if (!obj->map_and_fenceable) {
@@ -1410,10 +2006,10 @@
 		}
 	}
 
-	if (obj->tiling_mode == I915_TILING_NONE)
-		ret = i915_gem_object_put_fence(obj);
-	else
-		ret = i915_gem_object_get_fence(obj, NULL);
+	if (!obj->has_global_gtt_mapping)
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+	ret = i915_gem_object_get_fence(obj);
 	if (ret != 0) {
 		cause = 50;
 		goto unlock;
@@ -1423,10 +2019,13 @@
 		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
 	obj->fault_mappable = true;
-	VM_OBJECT_LOCK(vm_obj);
-	m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
-	    offset);
+	VM_OBJECT_WLOCK(vm_obj);
+	m = PHYS_TO_VM_PAGE(dev->agp->base + obj->gtt_offset + offset);
+	KASSERT((m->flags & PG_FICTITIOUS) != 0,
+	    ("physical address %#jx not fictitious",
+	    (uintmax_t)(dev->agp->base + obj->gtt_offset + offset)));
 	if (m == NULL) {
+		VM_OBJECT_WUNLOCK(vm_obj);
 		cause = 60;
 		ret = -EFAULT;
 		goto unlock;
@@ -1435,16 +2034,23 @@
 	    ("not fictitious %p", m));
 	KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
 
-	if ((m->flags & VPO_BUSY) != 0) {
+	if (vm_page_busied(m)) {
 		DRM_UNLOCK(dev);
-		vm_page_sleep_if_busy(m, false, "915pbs");
+		vm_page_lock(m);
+		VM_OBJECT_WUNLOCK(vm_obj);
+		vm_page_busy_sleep(m, "915pbs", false);
 		goto retry;
 	}
+	if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
+		DRM_UNLOCK(dev);
+		VM_OBJECT_WUNLOCK(vm_obj);
+		VM_WAIT;
+		goto retry;
+	}
 	m->valid = VM_PAGE_BITS_ALL;
-	vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
 have_page:
 	*mres = m;
-	vm_page_busy(m);
+	vm_page_xbusy(m);
 
 	CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
 	    m->phys_addr);
@@ -1465,9 +2071,9 @@
 	    -ret, cause);
 	if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
 		kern_yield(PRI_USER);
-		goto unlocked_vmobj;
+		goto retry;
 	}
-	VM_OBJECT_LOCK(vm_obj);
+	VM_OBJECT_WLOCK(vm_obj);
 	vm_object_pip_wakeup(vm_obj);
 	return (VM_PAGER_ERROR);
 }
@@ -1502,9 +2108,6 @@
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return (-ENODEV);
-
 	dev_priv = dev->dev_private;
 
 	ret = i915_mutex_lock_interruptible(dev);
@@ -1664,6 +2267,7 @@
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
+	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
@@ -1697,6 +2301,10 @@
 		obj->dirty = 1;
 	}
 
+	/* And bump the LRU for this access */
+	if (i915_gem_object_is_inactive(obj))
+		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
 	CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj,
 	    old_read_domains, old_write_domain);
 	return (0);
@@ -1737,7 +2345,8 @@
 				return (ret);
 		}
 
-		i915_gem_gtt_rebind_object(obj, cache_level);
+		if (obj->has_global_gtt_mapping)
+			i915_gem_gtt_bind_object(obj, cache_level);
 		if (obj->has_aliasing_ppgtt_mapping)
 			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
 			    obj, cache_level);
@@ -1771,6 +2380,22 @@
 	return (0);
 }
 
+static bool is_pin_display(struct drm_i915_gem_object *obj)
+{
+	/* There are 3 sources that pin objects:
+	 *   1. The display engine (scanouts, sprites, cursors);
+	 *   2. Reservations for execbuffer;
+	 *   3. The user.
+	 *
+	 * We can ignore reservations as we hold the struct_mutex and
+	 * are only called outside of the reservation path.  The user
+	 * can only increment pin_count once, and so if after
+	 * subtracting the potential reference by the user, any pin_count
+	 * remains, it must be due to another use by the display engine.
+	 */
+	return obj->pin_count - !!obj->user_pin_count;
+}
+
 int
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
     u32 alignment, struct intel_ring_buffer *pipelined)
@@ -1783,18 +2408,19 @@
 		return (ret);
 
 	if (pipelined != obj->ring) {
-		ret = i915_gem_object_wait_rendering(obj);
-		if (ret == -ERESTART || ret == -EINTR)
+		ret = i915_gem_object_sync(obj, pipelined);
+		if (ret)
 			return (ret);
 	}
 
+	obj->pin_display = true;
 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
 	if (ret != 0)
-		return (ret);
+		goto err_unpin_display;
 
 	ret = i915_gem_object_pin(obj, alignment, true);
 	if (ret != 0)
-		return (ret);
+		goto err_unpin_display;
 
 	i915_gem_object_flush_cpu_write_domain(obj);
 
@@ -1808,8 +2434,19 @@
 	CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
 	    obj, old_read_domains, obj->base.write_domain);
 	return (0);
+
+err_unpin_display:
+	obj->pin_display = is_pin_display(obj);
+	return ret;
 }
 
+void
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_unpin(obj);
+	obj->pin_display = is_pin_display(obj);
+}
+
 int
 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
 {
@@ -1833,7 +2470,7 @@
 	return (0);
 }
 
-static int
+int
 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	uint32_t old_write_domain, old_read_domains;
@@ -1846,12 +2483,13 @@
 	if (ret != 0)
 		return (ret);
 
-	ret = i915_gem_object_wait_rendering(obj);
-	if (ret != 0)
-		return (ret);
+	if (write || obj->pending_gpu_write) {
+		ret = i915_gem_object_wait_rendering(obj);
+		if (ret != 0)
+			return (ret);
+	}
 
 	i915_gem_object_flush_gtt_write_domain(obj);
-	i915_gem_object_set_to_full_cpu_read_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
 	old_read_domains = obj->base.read_domains;
@@ -1874,74 +2512,6 @@
 	return (0);
 }
 
-static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
-{
-	int i;
-
-	if (obj->page_cpu_valid == NULL)
-		return;
-
-	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) {
-		for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
-			if (obj->page_cpu_valid[i] != 0)
-				continue;
-			drm_clflush_pages(obj->pages + i, 1);
-		}
-	}
-
-	free(obj->page_cpu_valid, DRM_I915_GEM);
-	obj->page_cpu_valid = NULL;
-}
-
-static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
-    uint64_t offset, uint64_t size)
-{
-	uint32_t old_read_domains;
-	int i, ret;
-
-	if (offset == 0 && size == obj->base.size)
-		return (i915_gem_object_set_to_cpu_domain(obj, 0));
-
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret != 0)
-		return (ret);
-	ret = i915_gem_object_wait_rendering(obj);
-	if (ret != 0)
-		return (ret);
-
-	i915_gem_object_flush_gtt_write_domain(obj);
-
-	if (obj->page_cpu_valid == NULL &&
-	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
-		return (0);
-
-	if (obj->page_cpu_valid == NULL) {
-		obj->page_cpu_valid = malloc(obj->base.size / PAGE_SIZE,
-		    DRM_I915_GEM, M_WAITOK | M_ZERO);
-	} else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
-		memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
-
-	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
-	     i++) {
-		if (obj->page_cpu_valid[i])
-			continue;
-		drm_clflush_pages(obj->pages + i, 1);
-		obj->page_cpu_valid[i] = 1;
-	}
-
-	KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
-	    ("In gpu write domain"));
-
-	old_read_domains = obj->base.read_domains;
-	obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
-
-	CTR3(KTR_DRM, "object_change_domain set_cpu_read %p %x %x", obj,
-	    old_read_domains, obj->base.write_domain);
-	return (0);
-}
-
 static uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
@@ -2068,11 +2638,11 @@
 	if (free_space != NULL) {
 		if (map_and_fenceable)
 			obj->gtt_space = drm_mm_get_block_range_generic(
-			    free_space, size, alignment, 0,
+			    free_space, size, alignment, 0, 0,
 			    dev_priv->mm.gtt_mappable_end, 1);
 		else
 			obj->gtt_space = drm_mm_get_block_generic(free_space,
-			    size, alignment, 1);
+			    size, alignment, 0, 1);
 	}
 	if (obj->gtt_space == NULL) {
 		ret = i915_gem_evict_something(dev, size, alignment,
@@ -2087,14 +2657,12 @@
 		obj->gtt_space = NULL;
 		/*
 		 * i915_gem_object_get_pages_gtt() cannot return
-		 * ENOMEM, since we use vm_page_grab(VM_ALLOC_RETRY)
-		 * (which does not support operation without a flag
-		 * anyway).
+		 * ENOMEM, since we use vm_page_grab().
 		 */
 		return (ret);
 	}
 
-	ret = i915_gem_gtt_bind_object(obj);
+	ret = i915_gem_gtt_prepare_object(obj);
 	if (ret != 0) {
 		i915_gem_object_put_pages_gtt(obj);
 		drm_mm_put_block(obj->gtt_space);
@@ -2104,6 +2672,9 @@
 		goto search_free;
 	}
 
+	if (!dev_priv->mm.aliasing_ppgtt)
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
+
 	list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
 	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
@@ -2127,9 +2698,49 @@
 	return (0);
 }
 
-static void
-i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+		     struct intel_ring_buffer *to)
 {
+	struct intel_ring_buffer *from = obj->ring;
+	u32 seqno;
+	int ret, idx;
+
+	if (from == NULL || to == from)
+		return 0;
+
+	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+		return i915_gem_object_wait_rendering(obj);
+
+	idx = intel_ring_sync_index(from, to);
+
+	seqno = obj->last_rendering_seqno;
+	if (seqno <= from->sync_seqno[idx])
+		return 0;
+
+	if (seqno == from->outstanding_lazy_request) {
+		struct drm_i915_gem_request *request;
+
+		request = malloc(sizeof(*request), DRM_I915_GEM,
+		    M_WAITOK | M_ZERO);
+		ret = i915_add_request(from, NULL, request);
+		if (ret) {
+			free(request, DRM_I915_GEM);
+			return ret;
+		}
+		seqno = request->seqno;
+	}
+
+
+	ret = to->sync_to(to, from, seqno);
+	if (!ret)
+		from->sync_seqno[idx] = seqno;
+
+	return ret;
+}
+
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
 	u32 old_write_domain, old_read_domains;
 
 	/* Act a barrier for all accesses through the GTT */
@@ -2161,13 +2772,11 @@
 	ret = 0;
 	if (obj->gtt_space == NULL)
 		return (0);
-	if (obj->pin_count != 0) {
-		DRM_ERROR("Attempting to unbind pinned buffer\n");
+	if (obj->pin_count != 0)
 		return (-EINVAL);
-	}
 
 	ret = i915_gem_object_finish_gpu(obj);
-	if (ret == -ERESTART || ret == -EINTR)
+	if (ret == -ERESTARTSYS || ret == -EINTR)
 		return (ret);
 
 	i915_gem_object_finish_gtt(obj);
@@ -2174,7 +2783,7 @@
 
 	if (ret == 0)
 		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-	if (ret == -ERESTART || ret == -EINTR)
+	if (ret == -ERESTARTSYS || ret == -EINTR)
 		return (ret);
 	if (ret != 0) {
 		i915_gem_clflush_object(obj);
@@ -2183,14 +2792,17 @@
 	}
 
 	ret = i915_gem_object_put_fence(obj);
-	if (ret == -ERESTART)
+	if (ret)
 		return (ret);
 
-	i915_gem_gtt_unbind_object(obj);
+	if (obj->has_global_gtt_mapping)
+		i915_gem_gtt_unbind_object(obj);
 	if (obj->has_aliasing_ppgtt_mapping) {
 		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
 		obj->has_aliasing_ppgtt_mapping = 0;
 	}
+	i915_gem_gtt_finish_object(obj);
+
 	i915_gem_object_put_pages_gtt(obj);
 
 	list_del_init(&obj->gtt_list);
@@ -2208,7 +2820,71 @@
 	return (ret);
 }
 
+static void
+i915_gem_object_put_pages_range_locked(struct drm_i915_gem_object *obj,
+    vm_pindex_t si, vm_pindex_t ei)
+{
+	vm_object_t vm_obj;
+	vm_page_t m;
+	vm_pindex_t i;
+
+	vm_obj = obj->base.vm_obj;
+	VM_OBJECT_ASSERT_LOCKED(vm_obj);
+	for (i = si,  m = vm_page_lookup(vm_obj, i); i < ei;
+	    m = vm_page_next(m), i++) {
+		KASSERT(m->pindex == i, ("pindex %jx %jx",
+		    (uintmax_t)m->pindex, (uintmax_t)i));
+		vm_page_lock(m);
+		vm_page_unwire(m, 0);
+		if (m->wire_count == 0)
+			atomic_add_long(&i915_gem_wired_pages_cnt, -1);
+		vm_page_unlock(m);
+	}
+}
+
+static void
+i915_gem_object_put_pages_range(struct drm_i915_gem_object *obj,
+    off_t start, off_t end)
+{
+	vm_object_t vm_obj;
+
+	vm_obj = obj->base.vm_obj;
+	VM_OBJECT_WLOCK(vm_obj);
+	i915_gem_object_put_pages_range_locked(obj,
+	    OFF_TO_IDX(trunc_page(start)), OFF_TO_IDX(round_page(end)));
+	VM_OBJECT_WUNLOCK(vm_obj);
+}
+
 static int
+i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
+    off_t start, off_t end)
+{
+	vm_object_t vm_obj;
+	vm_page_t m;
+	vm_pindex_t si, ei, i;
+	bool need_swizzle, fresh;
+
+	need_swizzle = i915_gem_object_needs_bit17_swizzle(obj) != 0;
+	vm_obj = obj->base.vm_obj;
+	si = OFF_TO_IDX(trunc_page(start));
+	ei = OFF_TO_IDX(round_page(end));
+	VM_OBJECT_WLOCK(vm_obj);
+	for (i = si; i < ei; i++) {
+		m = i915_gem_wire_page(vm_obj, i, &fresh);
+		if (m == NULL)
+			goto failed;
+		if (need_swizzle && fresh)
+			i915_gem_object_do_bit_17_swizzle_page(obj, m);
+	}
+	VM_OBJECT_WUNLOCK(vm_obj);
+	return (0);
+failed:
+	i915_gem_object_put_pages_range_locked(obj, si, i);
+	VM_OBJECT_WUNLOCK(vm_obj);
+	return (-EIO);
+}
+
+static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
     int flags)
 {
@@ -2215,36 +2891,30 @@
 	struct drm_device *dev;
 	vm_object_t vm_obj;
 	vm_page_t m;
-	int page_count, i, j;
+	vm_pindex_t i, page_count;
+	int res;
 
 	dev = obj->base.dev;
 	KASSERT(obj->pages == NULL, ("Obj already has pages"));
-	page_count = obj->base.size / PAGE_SIZE;
+	page_count = OFF_TO_IDX(obj->base.size);
 	obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
 	    M_WAITOK);
+	res = i915_gem_object_get_pages_range(obj, 0, obj->base.size);
+	if (res != 0) {
+		free(obj->pages, DRM_I915_GEM);
+		obj->pages = NULL;
+		return (res);
+	}
 	vm_obj = obj->base.vm_obj;
-	VM_OBJECT_LOCK(vm_obj);
-	for (i = 0; i < page_count; i++) {
-		if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
-			goto failed;
+	VM_OBJECT_WLOCK(vm_obj);
+	for (i = 0, m = vm_page_lookup(vm_obj, 0); i < page_count;
+	    i++, m = vm_page_next(m)) {
+		KASSERT(m->pindex == i, ("pindex %jx %jx",
+		    (uintmax_t)m->pindex, (uintmax_t)i));
+		obj->pages[i] = m;
 	}
-	VM_OBJECT_UNLOCK(vm_obj);
-	if (i915_gem_object_needs_bit17_swizzle(obj))
-		i915_gem_object_do_bit_17_swizzle(obj);
+	VM_OBJECT_WUNLOCK(vm_obj);
 	return (0);
-
-failed:
-	for (j = 0; j < i; j++) {
-		m = obj->pages[j];
-		vm_page_lock(m);
-		vm_page_unwire(m, 0);
-		vm_page_unlock(m);
-		atomic_add_long(&i915_gem_wired_pages_cnt, -1);
-	}
-	VM_OBJECT_UNLOCK(vm_obj);
-	free(obj->pages, DRM_I915_GEM);
-	obj->pages = NULL;
-	return (-EIO);
 }
 
 #define	GEM_PARANOID_CHECK_GTT 0
@@ -2287,7 +2957,7 @@
 	if (obj->madv == I915_MADV_DONTNEED)
 		obj->dirty = 0;
 	page_count = obj->base.size / PAGE_SIZE;
-	VM_OBJECT_LOCK(obj->base.vm_obj);
+	VM_OBJECT_WLOCK(obj->base.vm_obj);
 #if GEM_PARANOID_CHECK_GTT
 	i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
 #endif
@@ -2302,7 +2972,7 @@
 		vm_page_unlock(m);
 		atomic_add_long(&i915_gem_wired_pages_cnt, -1);
 	}
-	VM_OBJECT_UNLOCK(obj->base.vm_obj);
+	VM_OBJECT_WUNLOCK(obj->base.vm_obj);
 	obj->dirty = 0;
 	free(obj->pages, DRM_I915_GEM);
 	obj->pages = NULL;
@@ -2324,17 +2994,17 @@
 	if (devobj != NULL) {
 		page_count = OFF_TO_IDX(obj->base.size);
 
-		VM_OBJECT_LOCK(devobj);
+		VM_OBJECT_WLOCK(devobj);
 retry:
 		for (i = 0; i < page_count; i++) {
 			m = vm_page_lookup(devobj, i);
 			if (m == NULL)
 				continue;
-			if (vm_page_sleep_if_busy(m, true, "915unm"))
+			if (vm_page_sleep_if_busy(m, "915unm"))
 				goto retry;
 			cdev_pager_free_page(devobj, m);
 		}
-		VM_OBJECT_UNLOCK(devobj);
+		VM_OBJECT_WUNLOCK(devobj);
 		vm_object_deallocate(devobj);
 	}
 
@@ -2353,10 +3023,10 @@
 	    obj->ring != NULL ? obj->ring->name : "none", obj->gtt_offset,
 	    obj->active, obj->last_rendering_seqno);
 	if (obj->active) {
-		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
-		    true);
+		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
 		if (ret != 0)
 			return (ret);
+		i915_gem_retire_requests_ring(obj->ring);
 	}
 	return (0);
 }
@@ -2385,7 +3055,6 @@
 	obj->last_rendering_seqno = seqno;
 	if (obj->fenced_gpu_access) {
 		obj->last_fenced_seqno = seqno;
-		obj->last_fenced_ring = ring;
 
 		/* Bump MRU to take account of the delayed flush */
 		if (obj->fence_reg != I915_FENCE_REG_NONE) {
@@ -2422,15 +3091,11 @@
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (obj->pin_count != 0)
-		list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
-	else
-		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
 	KASSERT(list_empty(&obj->gpu_write_list), ("On gpu_write_list"));
 	KASSERT(obj->active, ("Object not active"));
 	obj->ring = NULL;
-	obj->last_fenced_ring = NULL;
 
 	i915_gem_object_move_off_active(obj);
 	obj->fenced_gpu_access = false;
@@ -2452,9 +3117,10 @@
 	vm_object_t vm_obj;
 
 	vm_obj = obj->base.vm_obj;
-	VM_OBJECT_LOCK(vm_obj);
+	VM_OBJECT_WLOCK(vm_obj);
 	vm_object_page_remove(vm_obj, 0, 0, false);
-	VM_OBJECT_UNLOCK(vm_obj);
+	VM_OBJECT_WUNLOCK(vm_obj);
+	drm_gem_free_mmap_offset(&obj->base);
 	obj->madv = I915_MADV_PURGED_INTERNAL;
 }
 
@@ -2498,13 +3164,13 @@
 }
 
 static vm_page_t
-i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
+i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex, bool *fresh)
 {
 	vm_page_t m;
 	int rv;
 
-	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
-	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+	VM_OBJECT_ASSERT_WLOCKED(object);
+	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
 	if (m->valid != VM_PAGE_BITS_ALL) {
 		if (vm_pager_has_page(object, pindex, NULL, NULL)) {
 			rv = vm_pager_get_pages(object, &m, 1, 0);
@@ -2517,16 +3183,22 @@
 				vm_page_unlock(m);
 				return (NULL);
 			}
+			if (fresh != NULL)
+				*fresh = true;
 		} else {
 			pmap_zero_page(m);
 			m->valid = VM_PAGE_BITS_ALL;
 			m->dirty = 0;
+			if (fresh != NULL)
+				*fresh = false;
 		}
+	} else if (fresh != NULL) {
+		*fresh = false;
 	}
 	vm_page_lock(m);
 	vm_page_wire(m);
 	vm_page_unlock(m);
-	vm_page_wakeup(m);
+	vm_page_xunbusy(m);
 	atomic_add_long(&i915_gem_wired_pages_cnt, 1);
 	return (m);
 }
@@ -2552,7 +3224,7 @@
 }
 
 static int
-i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+i915_ring_idle(struct intel_ring_buffer *ring)
 {
 	int ret;
 
@@ -2566,41 +3238,41 @@
 			return ret;
 	}
 
-	return (i915_wait_request(ring, i915_gem_next_request_seqno(ring),
-	    do_retire));
+	return (i915_wait_request(ring, i915_gem_next_request_seqno(ring)));
 }
 
 int
-i915_gpu_idle(struct drm_device *dev, bool do_retire)
+i915_gpu_idle(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int ret, i;
 
 	/* Flush everything onto the inactive list. */
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ret = i915_ring_idle(&dev_priv->rings[i], do_retire);
+	for_each_ring(ring, dev_priv, i) {
+		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
 		if (ret)
 			return ret;
+
+		ret = i915_ring_idle(ring);
+		if (ret)
+			return ret;
+
+		/* Is the device fubar? */
+		if (!list_empty(&ring->gpu_write_list))
+			return -EBUSY;
 	}
 
 	return 0;
 }
 
-int
-i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, bool do_retire)
+static int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv)
 {
-	drm_i915_private_t *dev_priv;
-	struct drm_i915_gem_request *request;
-	uint32_t ier;
-	int flags, ret;
-	bool recovery_complete;
+	DRM_LOCK_ASSERT(dev_priv->dev);
 
-	KASSERT(seqno != 0, ("Zero seqno"));
-
-	dev_priv = ring->dev->dev_private;
-	ret = 0;
-
 	if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
+		bool recovery_complete;
 		/* Give the error handler a chance to run. */
 		mtx_lock(&dev_priv->error_completion_lock);
 		recovery_complete = (&dev_priv->error_completion) > 0;
@@ -2608,11 +3280,25 @@
 		return (recovery_complete ? -EIO : -EAGAIN);
 	}
 
+	return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+	int ret = 0;
+
+	DRM_LOCK_ASSERT(ring->dev);
+
 	if (seqno == ring->outstanding_lazy_request) {
+		struct drm_i915_gem_request *request;
+
 		request = malloc(sizeof(*request), DRM_I915_GEM,
 		    M_WAITOK | M_ZERO);
-		if (request == NULL)
-			return (-ENOMEM);
 
 		ret = i915_add_request(ring, NULL, request);
 		if (ret != 0) {
@@ -2620,59 +3306,67 @@
 			return (ret);
 		}
 
-		seqno = request->seqno;
+		MPASS(seqno == request->seqno);
 	}
+	return ret;
+}
 
-	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
-		if (HAS_PCH_SPLIT(ring->dev))
-			ier = I915_READ(DEIER) | I915_READ(GTIER);
-		else
-			ier = I915_READ(IER);
-		if (!ier) {
-			DRM_ERROR("something (likely vbetool) disabled "
-				  "interrupts, re-enabling\n");
-			ring->dev->driver->irq_preinstall(ring->dev);
-			ring->dev->driver->irq_postinstall(ring->dev);
-		}
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+			bool interruptible)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	int ret = 0, flags;
 
-		CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
+	if (i915_seqno_passed(ring->get_seqno(ring), seqno))
+		return 0;
 
-		ring->waiting_seqno = seqno;
-		mtx_lock(&ring->irq_lock);
-		if (ring->irq_get(ring)) {
-			flags = dev_priv->mm.interruptible ? PCATCH : 0;
-			while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
-			    && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
-			    ret == 0) {
-				ret = -msleep(ring, &ring->irq_lock, flags,
-				    "915gwr", 0);
-			}
-			ring->irq_put(ring);
-			mtx_unlock(&ring->irq_lock);
-		} else {
-			mtx_unlock(&ring->irq_lock);
-			if (_intel_wait_for(ring->dev,
-			    i915_seqno_passed(ring->get_seqno(ring), seqno) ||
-			    atomic_load_acq_int(&dev_priv->mm.wedged), 3000,
-			    0, "i915wrq") != 0)
-				ret = -EBUSY;
-		}
-		ring->waiting_seqno = 0;
+	CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
 
-		CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno,
-		    ret);
+	mtx_lock(&dev_priv->irq_lock);
+	if (!ring->irq_get(ring)) {
+		mtx_unlock(&dev_priv->irq_lock);
+		return (-ENODEV);
 	}
+
+	flags = interruptible ? PCATCH : 0;
+	while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
+	    && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
+	    ret == 0) {
+		ret = -msleep(ring, &dev_priv->irq_lock, flags, "915gwr", 0);
+		if (ret == -ERESTART)
+			ret = -ERESTARTSYS;
+	}
+	ring->irq_put(ring);
+	mtx_unlock(&dev_priv->irq_lock);
+
+	CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, ret);
+
+	return ret;
+}
+
+int
+i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+	drm_i915_private_t *dev_priv;
+	int ret;
+
+	KASSERT(seqno != 0, ("Zero seqno"));
+
+	dev_priv = ring->dev->dev_private;
+	ret = 0;
+
+	ret = i915_gem_check_wedge(dev_priv);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_olr(ring, seqno);
+	if (ret)
+		return ret;
+
+	ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
 	if (atomic_load_acq_int(&dev_priv->mm.wedged))
 		ret = -EAGAIN;
 
-	/* Directly dispatch request retiring.  While we have the work queue
-	 * to handle this, the waiter on a request often wants an associated
-	 * buffer to have made it to the inactive list, and we would need
-	 * a separate wait queue to handle that.
-	 */
-	if (ret == 0 && do_retire)
-		i915_gem_retire_requests_ring(ring);
-
 	return (ret);
 }
 
@@ -2833,20 +3527,18 @@
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-		struct drm_i915_gem_object *obj = reg->obj;
 
-		if (!obj)
-			continue;
+		i915_gem_write_fence(dev, i, NULL);
 
-		if (obj->tiling_mode)
-			i915_gem_release_mmap(obj);
+		if (reg->obj)
+			i915_gem_object_fence_lost(reg->obj);
 
-		reg->obj->fence_reg = I915_FENCE_REG_NONE;
-		reg->obj->fenced_gpu_access = false;
-		reg->obj->last_fenced_seqno = 0;
-		reg->obj->last_fenced_ring = NULL;
-		i915_gem_clear_fence_reg(dev, reg);
+		reg->pin_count = 0;
+		reg->obj = NULL;
+		INIT_LIST_HEAD(&reg->lru_list);
 	}
+
+	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 }
 
 void
@@ -2854,10 +3546,11 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
+	struct intel_ring_buffer *ring;
 	int i;
 
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		i915_gem_reset_ring_lists(dev_priv, &dev_priv->rings[i]);
+	for_each_ring(ring, dev_priv, i)
+		i915_gem_reset_ring_lists(dev_priv, ring);
 
 	/* Remove anything from the flushing lists. The GPU cache is likely
 	 * to be lost on reset along with the data, so simply move the
@@ -2899,7 +3592,7 @@
 	seqno = ring->get_seqno(ring);
 	CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
 
-	for (i = 0; i < DRM_ARRAY_SIZE(ring->sync_seqno); i++)
+	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
 		if (seqno >= ring->sync_seqno[i])
 			ring->sync_seqno[i] = 0;
 
@@ -2943,9 +3636,10 @@
 
 	if (ring->trace_irq_seqno &&
 	    i915_seqno_passed(seqno, ring->trace_irq_seqno)) {
-		mtx_lock(&ring->irq_lock);
+		struct drm_i915_private *dev_priv = ring->dev->dev_private;
+		mtx_lock(&dev_priv->irq_lock);
 		ring->irq_put(ring);
-		mtx_unlock(&ring->irq_lock);
+		mtx_unlock(&dev_priv->irq_lock);
 		ring->trace_irq_seqno = 0;
 	}
 }
@@ -2954,209 +3648,188 @@
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj, *next;
+	struct intel_ring_buffer *ring;
 	int i;
 
-	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
-		list_for_each_entry_safe(obj, next,
-		    &dev_priv->mm.deferred_free_list, mm_list)
-			i915_gem_free_object_tail(obj);
-	}
-
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		i915_gem_retire_requests_ring(&dev_priv->rings[i]);
+	for_each_ring(ring, dev_priv, i)
+		i915_gem_retire_requests_ring(ring);
 }
 
-static int
-sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
-    struct intel_ring_buffer *pipelined)
+static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
+					struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 size = obj->gtt_space->size;
-	int regnum = obj->fence_reg;
 	uint64_t val;
 
-	val = (uint64_t)((obj->gtt_offset + size - 4096) &
-			 0xfffff000) << 32;
-	val |= obj->gtt_offset & 0xfffff000;
-	val |= (uint64_t)((obj->stride / 128) - 1) <<
-		SANDYBRIDGE_FENCE_PITCH_SHIFT;
+	if (obj) {
+		u32 size = obj->gtt_space->size;
 
-	if (obj->tiling_mode == I915_TILING_Y)
-		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
-	val |= I965_FENCE_REG_VALID;
+		val = (uint64_t)((obj->gtt_offset + size - 4096) &
+				 0xfffff000) << 32;
+		val |= obj->gtt_offset & 0xfffff000;
+		val |= (uint64_t)((obj->stride / 128) - 1) <<
+			SANDYBRIDGE_FENCE_PITCH_SHIFT;
 
-	if (pipelined) {
-		int ret = intel_ring_begin(pipelined, 6);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(pipelined, MI_NOOP);
-		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
-		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
-		intel_ring_emit(pipelined, (u32)val);
-		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
-		intel_ring_emit(pipelined, (u32)(val >> 32));
-		intel_ring_advance(pipelined);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+		val |= I965_FENCE_REG_VALID;
 	} else
-		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+		val = 0;
 
-	return 0;
+	I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
+	POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
 }
 
-static int
-i965_write_fence_reg(struct drm_i915_gem_object *obj,
-    struct intel_ring_buffer *pipelined)
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 size = obj->gtt_space->size;
-	int regnum = obj->fence_reg;
 	uint64_t val;
 
-	val = (uint64_t)((obj->gtt_offset + size - 4096) &
-		    0xfffff000) << 32;
-	val |= obj->gtt_offset & 0xfffff000;
-	val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
-	if (obj->tiling_mode == I915_TILING_Y)
-		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
-	val |= I965_FENCE_REG_VALID;
+	if (obj) {
+		u32 size = obj->gtt_space->size;
 
-	if (pipelined) {
-		int ret = intel_ring_begin(pipelined, 6);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(pipelined, MI_NOOP);
-		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
-		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
-		intel_ring_emit(pipelined, (u32)val);
-		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
-		intel_ring_emit(pipelined, (u32)(val >> 32));
-		intel_ring_advance(pipelined);
+		val = (uint64_t)((obj->gtt_offset + size - 4096) &
+				 0xfffff000) << 32;
+		val |= obj->gtt_offset & 0xfffff000;
+		val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+		val |= I965_FENCE_REG_VALID;
 	} else
-		I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+		val = 0;
 
-	return 0;
+	I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
+	POSTING_READ(FENCE_REG_965_0 + reg * 8);
 }
 
-static int
-i915_write_fence_reg(struct drm_i915_gem_object *obj,
-    struct intel_ring_buffer *pipelined)
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 size = obj->gtt_space->size;
-	u32 fence_reg, val, pitch_val;
-	int tile_width;
+	u32 val;
 
-	if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
-	    (size & -size) != size || (obj->gtt_offset & (size - 1))) {
-		printf(
-"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-		 obj->gtt_offset, obj->map_and_fenceable, size);
-		return -EINVAL;
-	}
+	if (obj) {
+		u32 size = obj->gtt_space->size;
+		int pitch_val;
+		int tile_width;
 
-	if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
-		tile_width = 128;
-	else
-		tile_width = 512;
+		if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (obj->gtt_offset & (size - 1)))
+			printf(
+		     "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		     obj->gtt_offset, obj->map_and_fenceable, size);
 
-	/* Note: pitch better be a power of two tile widths */
-	pitch_val = obj->stride / tile_width;
-	pitch_val = ffs(pitch_val) - 1;
+		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+			tile_width = 128;
+		else
+			tile_width = 512;
 
-	val = obj->gtt_offset;
-	if (obj->tiling_mode == I915_TILING_Y)
-		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-	val |= I915_FENCE_SIZE_BITS(size);
-	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-	val |= I830_FENCE_REG_VALID;
+		/* Note: pitch better be a power of two tile widths */
+		pitch_val = obj->stride / tile_width;
+		pitch_val = ffs(pitch_val) - 1;
 
-	fence_reg = obj->fence_reg;
-	if (fence_reg < 8)
-		fence_reg = FENCE_REG_830_0 + fence_reg * 4;
+		val = obj->gtt_offset;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I915_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	if (reg < 8)
+		reg = FENCE_REG_830_0 + reg * 4;
 	else
-		fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+		reg = FENCE_REG_945_8 + (reg - 8) * 4;
 
-	if (pipelined) {
-		int ret = intel_ring_begin(pipelined, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(pipelined, MI_NOOP);
-		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(pipelined, fence_reg);
-		intel_ring_emit(pipelined, val);
-		intel_ring_advance(pipelined);
-	} else
-		I915_WRITE(fence_reg, val);
-
-	return 0;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
 }
 
-static int
-i830_write_fence_reg(struct drm_i915_gem_object *obj,
-    struct intel_ring_buffer *pipelined)
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+				struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 size = obj->gtt_space->size;
-	int regnum = obj->fence_reg;
 	uint32_t val;
-	uint32_t pitch_val;
 
-	if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
-	    (size & -size) != size || (obj->gtt_offset & (size - 1))) {
-		printf(
-"object 0x%08x not 512K or pot-size 0x%08x aligned\n",
-		    obj->gtt_offset, size);
-		return -EINVAL;
-	}
+	if (obj) {
+		u32 size = obj->gtt_space->size;
+		uint32_t pitch_val;
 
-	pitch_val = obj->stride / 128;
-	pitch_val = ffs(pitch_val) - 1;
+		if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (obj->gtt_offset & (size - 1)))
+		    printf(
+		     "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+		     obj->gtt_offset, size);
 
-	val = obj->gtt_offset;
-	if (obj->tiling_mode == I915_TILING_Y)
-		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-	val |= I830_FENCE_SIZE_BITS(size);
-	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-	val |= I830_FENCE_REG_VALID;
+		pitch_val = obj->stride / 128;
+		pitch_val = ffs(pitch_val) - 1;
 
-	if (pipelined) {
-		int ret = intel_ring_begin(pipelined, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(pipelined, MI_NOOP);
-		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
-		intel_ring_emit(pipelined, val);
-		intel_ring_advance(pipelined);
+		val = obj->gtt_offset;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I830_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
 	} else
-		I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+		val = 0;
 
-	return 0;
+	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+	POSTING_READ(FENCE_REG_830_0 + reg * 4);
 }
 
-static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
 {
-	return i915_seqno_passed(ring->get_seqno(ring), seqno);
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+	case 5:
+	case 4: i965_write_fence_reg(dev, reg, obj); break;
+	case 3: i915_write_fence_reg(dev, reg, obj); break;
+	case 2: i830_write_fence_reg(dev, reg, obj); break;
+	default: break;
+	}
 }
 
+static inline int fence_number(struct drm_i915_private *dev_priv,
+			       struct drm_i915_fence_reg *fence)
+{
+	return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+					 struct drm_i915_fence_reg *fence,
+					 bool enable)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int reg = fence_number(dev_priv, fence);
+
+	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+	if (enable) {
+		obj->fence_reg = reg;
+		fence->obj = obj;
+		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+	} else {
+		obj->fence_reg = I915_FENCE_REG_NONE;
+		fence->obj = NULL;
+		list_del_init(&fence->lru_list);
+	}
+}
+
 static int
-i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
-    struct intel_ring_buffer *pipelined)
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
 {
 	int ret;
 
 	if (obj->fenced_gpu_access) {
 		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-			ret = i915_gem_flush_ring(obj->last_fenced_ring, 0,
-			    obj->base.write_domain);
+			ret = i915_gem_flush_ring(obj->ring,
+						  0, obj->base.write_domain);
 			if (ret)
 				return ret;
 		}
@@ -3164,18 +3837,13 @@
 		obj->fenced_gpu_access = false;
 	}
 
-	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
-		if (!ring_passed_seqno(obj->last_fenced_ring,
-				       obj->last_fenced_seqno)) {
-			ret = i915_wait_request(obj->last_fenced_ring,
-						obj->last_fenced_seqno,
-						true);
-			if (ret)
-				return ret;
-		}
+	if (obj->last_fenced_seqno) {
+		ret = i915_wait_request(obj->ring,
+					obj->last_fenced_seqno);
+		if (ret)
+			return ret;
 
 		obj->last_fenced_seqno = 0;
-		obj->last_fenced_ring = NULL;
 	}
 
 	/* Ensure that all CPU reads are completed before installing a fence
@@ -3190,35 +3858,29 @@
 int
 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
-	if (obj->tiling_mode)
-		i915_gem_release_mmap(obj);
-
-	ret = i915_gem_object_flush_fence(obj, NULL);
+	ret = i915_gem_object_flush_fence(obj);
 	if (ret)
 		return ret;
 
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	if (obj->fence_reg == I915_FENCE_REG_NONE)
+		return 0;
 
-		if (dev_priv->fence_regs[obj->fence_reg].pin_count != 0)
-			printf("%s: pin_count %d\n", __func__,
-			    dev_priv->fence_regs[obj->fence_reg].pin_count);
-		i915_gem_clear_fence_reg(obj->base.dev,
-					 &dev_priv->fence_regs[obj->fence_reg]);
+	i915_gem_object_update_fence(obj,
+				     &dev_priv->fence_regs[obj->fence_reg],
+				     false);
+	i915_gem_object_fence_lost(obj);
 
-		obj->fence_reg = I915_FENCE_REG_NONE;
-	}
-
 	return 0;
 }
 
 static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev, struct intel_ring_buffer *pipelined)
+i915_find_fence_reg(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_fence_reg *reg, *first, *avail;
+	struct drm_i915_fence_reg *reg, *avail;
 	int i;
 
 	/* First try to find a free reg */
@@ -3236,197 +3898,66 @@
 		return NULL;
 
 	/* None available, try to steal one or wait for a user to finish */
-	avail = first = NULL;
 	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
 		if (reg->pin_count)
 			continue;
 
-		if (first == NULL)
-			first = reg;
-
-		if (!pipelined ||
-		    !reg->obj->last_fenced_ring ||
-		    reg->obj->last_fenced_ring == pipelined) {
-			avail = reg;
-			break;
-		}
+		return reg;
 	}
 
-	if (avail == NULL)
-		avail = first;
-
-	return avail;
+	return NULL;
 }
 
 int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
-    struct intel_ring_buffer *pipelined)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool enable = obj->tiling_mode != I915_TILING_NONE;
 	struct drm_i915_fence_reg *reg;
 	int ret;
 
-	pipelined = NULL;
+	/* Have we updated the tiling parameters upon the object and so
+	 * will need to serialise the write to the associated fence register?
+	 */
+	if (obj->fence_dirty) {
+		ret = i915_gem_object_flush_fence(obj);
+		if (ret)
+			return ret;
+	}
+
 	ret = 0;
 
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
 		reg = &dev_priv->fence_regs[obj->fence_reg];
-		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
-
-		if (obj->tiling_changed) {
-			ret = i915_gem_object_flush_fence(obj, pipelined);
-			if (ret)
-				return ret;
-
-			if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
-				pipelined = NULL;
-
-			if (pipelined) {
-				reg->setup_seqno =
-					i915_gem_next_request_seqno(pipelined);
-				obj->last_fenced_seqno = reg->setup_seqno;
-				obj->last_fenced_ring = pipelined;
-			}
-
-			goto update;
+		if (!obj->fence_dirty) {
+			list_move_tail(&reg->lru_list,
+				       &dev_priv->mm.fence_list);
+			return 0;
 		}
+	} else if (enable) {
+		reg = i915_find_fence_reg(dev);
+		if (reg == NULL)
+			return -EDEADLK;
 
-		if (!pipelined) {
-			if (reg->setup_seqno) {
-				if (!ring_passed_seqno(obj->last_fenced_ring,
-				    reg->setup_seqno)) {
-					ret = i915_wait_request(
-					    obj->last_fenced_ring,
-					    reg->setup_seqno,
-					    true);
-					if (ret)
-						return ret;
-				}
+		if (reg->obj) {
+			struct drm_i915_gem_object *old = reg->obj;
 
-				reg->setup_seqno = 0;
-			}
-		} else if (obj->last_fenced_ring &&
-			   obj->last_fenced_ring != pipelined) {
-			ret = i915_gem_object_flush_fence(obj, pipelined);
+			ret = i915_gem_object_flush_fence(old);
 			if (ret)
 				return ret;
-		}
 
-		if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
-			pipelined = NULL;
-		KASSERT(pipelined || reg->setup_seqno == 0, ("!pipelined"));
-
-		if (obj->tiling_changed) {
-			if (pipelined) {
-				reg->setup_seqno =
-					i915_gem_next_request_seqno(pipelined);
-				obj->last_fenced_seqno = reg->setup_seqno;
-				obj->last_fenced_ring = pipelined;
-			}
-			goto update;
+			i915_gem_object_fence_lost(old);
 		}
-
+	} else
 		return 0;
-	}
 
-	reg = i915_find_fence_reg(dev, pipelined);
-	if (reg == NULL)
-		return -EDEADLK;
+	i915_gem_object_update_fence(obj, reg, enable);
+	obj->fence_dirty = false;
 
-	ret = i915_gem_object_flush_fence(obj, pipelined);
-	if (ret)
-		return ret;
-
-	if (reg->obj) {
-		struct drm_i915_gem_object *old = reg->obj;
-
-		drm_gem_object_reference(&old->base);
-
-		if (old->tiling_mode)
-			i915_gem_release_mmap(old);
-
-		ret = i915_gem_object_flush_fence(old, pipelined);
-		if (ret) {
-			drm_gem_object_unreference(&old->base);
-			return ret;
-		}
-
-		if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
-			pipelined = NULL;
-
-		old->fence_reg = I915_FENCE_REG_NONE;
-		old->last_fenced_ring = pipelined;
-		old->last_fenced_seqno =
-			pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
-
-		drm_gem_object_unreference(&old->base);
-	} else if (obj->last_fenced_seqno == 0)
-		pipelined = NULL;
-
-	reg->obj = obj;
-	list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
-	obj->fence_reg = reg - dev_priv->fence_regs;
-	obj->last_fenced_ring = pipelined;
-
-	reg->setup_seqno =
-		pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
-	obj->last_fenced_seqno = reg->setup_seqno;
-
-update:
-	obj->tiling_changed = false;
-	switch (INTEL_INFO(dev)->gen) {
-	case 7:
-	case 6:
-		ret = sandybridge_write_fence_reg(obj, pipelined);
-		break;
-	case 5:
-	case 4:
-		ret = i965_write_fence_reg(obj, pipelined);
-		break;
-	case 3:
-		ret = i915_write_fence_reg(obj, pipelined);
-		break;
-	case 2:
-		ret = i830_write_fence_reg(obj, pipelined);
-		break;
-	}
-
-	return ret;
+	return 0;
 }
 
-static void
-i915_gem_clear_fence_reg(struct drm_device *dev, struct drm_i915_fence_reg *reg)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t fence_reg = reg - dev_priv->fence_regs;
-
-	switch (INTEL_INFO(dev)->gen) {
-	case 7:
-	case 6:
-		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
-		break;
-	case 5:
-	case 4:
-		I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
-		break;
-	case 3:
-		if (fence_reg >= 8)
-			fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
-		else
-	case 2:
-			fence_reg = FENCE_REG_830_0 + fence_reg * 4;
-
-		I915_WRITE(fence_reg, 0);
-		break;
-	}
-
-	list_del_init(&reg->lru_list);
-	reg->obj = NULL;
-	reg->setup_seqno = 0;
-	reg->pin_count = 0;
-}
-
 int
 i915_gem_init_object(struct drm_gem_object *obj)
 {
@@ -3439,7 +3970,7 @@
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
 
-	return (obj->gtt_space && !obj->active && obj->pin_count == 0);
+	return !obj->active;
 }
 
 static void
@@ -3447,6 +3978,7 @@
 {
 	drm_i915_private_t *dev_priv;
 	struct drm_device *dev;
+	struct intel_ring_buffer *ring;
 	bool idle;
 	int i;
 
@@ -3468,7 +4000,7 @@
 	 * objects indefinitely.
 	 */
 	idle = true;
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for_each_ring(ring, dev_priv, i) {
 		struct intel_ring_buffer *ring = &dev_priv->rings[i];
 
 		if (!list_empty(&ring->gpu_write_list)) {
@@ -3523,7 +4055,7 @@
 
 	phys_obj->id = id;
 
-	phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
+	phys_obj->handle = drm_pci_alloc(dev, size, align, BUS_SPACE_MAXADDR);
 	if (phys_obj->handle == NULL) {
 		ret = -ENOMEM;
 		goto free_obj;
@@ -3582,13 +4114,13 @@
 	vaddr = obj->phys_obj->handle->vaddr;
 
 	page_count = obj->base.size / PAGE_SIZE;
-	VM_OBJECT_LOCK(obj->base.vm_obj);
+	VM_OBJECT_WLOCK(obj->base.vm_obj);
 	for (i = 0; i < page_count; i++) {
-		m = i915_gem_wire_page(obj->base.vm_obj, i);
+		m = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
 		if (m == NULL)
 			continue; /* XXX */
 
-		VM_OBJECT_UNLOCK(obj->base.vm_obj);
+		VM_OBJECT_WUNLOCK(obj->base.vm_obj);
 		sf = sf_buf_alloc(m, 0);
 		if (sf != NULL) {
 			dst = (char *)sf_buf_kva(sf);
@@ -3597,7 +4129,7 @@
 		}
 		drm_clflush_pages(&m, 1);
 
-		VM_OBJECT_LOCK(obj->base.vm_obj);
+		VM_OBJECT_WLOCK(obj->base.vm_obj);
 		vm_page_reference(m);
 		vm_page_lock(m);
 		vm_page_dirty(m);
@@ -3605,7 +4137,7 @@
 		vm_page_unlock(m);
 		atomic_add_long(&i915_gem_wired_pages_cnt, -1);
 	}
-	VM_OBJECT_UNLOCK(obj->base.vm_obj);
+	VM_OBJECT_WUNLOCK(obj->base.vm_obj);
 	intel_gtt_chipset_flush();
 
 	obj->phys_obj->cur_obj = NULL;
@@ -3647,15 +4179,15 @@
 
 	page_count = obj->base.size / PAGE_SIZE;
 
-	VM_OBJECT_LOCK(obj->base.vm_obj);
+	VM_OBJECT_WLOCK(obj->base.vm_obj);
 	ret = 0;
 	for (i = 0; i < page_count; i++) {
-		m = i915_gem_wire_page(obj->base.vm_obj, i);
+		m = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
 		if (m == NULL) {
 			ret = -EIO;
 			break;
 		}
-		VM_OBJECT_UNLOCK(obj->base.vm_obj);
+		VM_OBJECT_WUNLOCK(obj->base.vm_obj);
 		sf = sf_buf_alloc(m, 0);
 		src = (char *)sf_buf_kva(sf);
 		dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
@@ -3662,7 +4194,7 @@
 		memcpy(dst, src, PAGE_SIZE);
 		sf_buf_free(sf);
 
-		VM_OBJECT_LOCK(obj->base.vm_obj);
+		VM_OBJECT_WLOCK(obj->base.vm_obj);
 
 		vm_page_reference(m);
 		vm_page_lock(m);
@@ -3670,39 +4202,12 @@
 		vm_page_unlock(m);
 		atomic_add_long(&i915_gem_wired_pages_cnt, -1);
 	}
-	VM_OBJECT_UNLOCK(obj->base.vm_obj);
+	VM_OBJECT_WUNLOCK(obj->base.vm_obj);
 
-	return (0);
+	return (ret);
 }
 
 static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj,
-    uint64_t data_ptr, uint64_t offset, uint64_t size,
-    struct drm_file *file_priv)
-{
-	char *user_data, *vaddr;
-	int ret;
-
-	vaddr = (char *)obj->phys_obj->handle->vaddr + offset;
-	user_data = (char *)(uintptr_t)data_ptr;
-
-	if (copyin_nofault(user_data, vaddr, size) != 0) {
-		/* The physical object once assigned is fixed for the lifetime
-		 * of the obj, so we can safely drop the lock and continue
-		 * to access vaddr.
-		 */
-		DRM_UNLOCK(dev);
-		ret = -copyin(user_data, vaddr, size);
-		DRM_LOCK(dev);
-		if (ret != 0)
-			return (ret);
-	}
-
-	intel_gtt_chipset_flush();
-	return (0);
-}
-
-static int
 i915_gpu_is_active(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv;
@@ -3759,7 +4264,7 @@
 		 * This has a dramatic impact to reduce the number of
 		 * OOM-killer events whilst running the GPU aggressively.
 		 */
-		if (i915_gpu_idle(dev, true) == 0)
+		if (i915_gpu_idle(dev) == 0)
 			goto rescan;
 	}
 	DRM_UNLOCK(dev);

Modified: trunk/sys/dev/drm2/i915/i915_gem_evict.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_gem_evict.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_gem_evict.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2008-2010 Intel Corporation
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_gem_evict.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_gem_evict.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -37,6 +38,9 @@
 static bool
 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
+	if (obj->pin_count)
+		return false;
+
 	list_add(&obj->exec_list, unwind);
 	return drm_mm_scan_add_block(obj->gtt_space);
 }
@@ -79,10 +83,10 @@
 	INIT_LIST_HEAD(&unwind_list);
 	if (mappable)
 		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
-					    alignment, 0,
+					    alignment, 0, 0,
 					    dev_priv->mm.gtt_mappable_end);
 	else
-		drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+		drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment, 0);
 
 	/* First see if there is a large enough contiguous idle region... */
 	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -93,7 +97,7 @@
 	/* Now merge in the soon-to-be-expired objects... */
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 		/* Does the object require an outstanding flush? */
-		if (obj->base.write_domain || obj->pin_count)
+		if (obj->base.write_domain)
 			continue;
 
 		if (mark_free(obj, &unwind_list))
@@ -102,14 +106,11 @@
 
 	/* Finally add anything with a pending flush (in order of retirement) */
 	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-		if (obj->pin_count)
-			continue;
-
 		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		if (!obj->base.write_domain || obj->pin_count)
+		if (!obj->base.write_domain)
 			continue;
 
 		if (mark_free(obj, &unwind_list))
@@ -169,8 +170,9 @@
 i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj, *next;
+	bool lists_empty;
 	int ret;
-	bool lists_empty;
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
@@ -180,32 +182,25 @@
 
 	CTR2(KTR_DRM, "evict_everything %p %d", dev, purgeable_only);
 
-	/* Flush everything (on to the inactive lists) and evict */
-	ret = i915_gpu_idle(dev, true);
+	/* The gpu_idle will flush everything in the write domain to the
+	 * active list. Then we must move everything off the active list
+	 * with retire requests.
+	 */
+	ret = i915_gpu_idle(dev);
 	if (ret)
 		return ret;
 
+	i915_gem_retire_requests(dev);
+
 	KASSERT(list_empty(&dev_priv->mm.flushing_list),
 	    ("flush list not empty"));
 
-	return i915_gem_evict_inactive(dev, purgeable_only);
-}
-
-/** Unbinds all inactive objects. */
-int
-i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj, *next;
-
-	CTR2(KTR_DRM, "evict_inactive %p %d", dev, purgeable_only);
-
+	/* Having flushed everything, unbind() should never raise an error */
 	list_for_each_entry_safe(obj, next,
 				 &dev_priv->mm.inactive_list, mm_list) {
 		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
-			int ret = i915_gem_object_unbind(obj);
-			if (ret)
-				return ret;
+			if (obj->pin_count == 0)
+				i915_gem_object_unbind(obj);
 		}
 	}
 

Modified: trunk/sys/dev/drm2/i915/i915_gem_execbuffer.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_gem_execbuffer.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_gem_execbuffer.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2008,2010 Intel Corporation
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_gem_execbuffer.c 236933 2012-06-11 21:44:24Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_gem_execbuffer.c 290454 2015-11-06 16:48:33Z jhb $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -192,7 +193,7 @@
 		i915_gem_clflush_object(obj);
 
 	if (obj->base.pending_write_domain)
-		cd->flips |= atomic_read(&obj->pending_flip);
+		cd->flips |= atomic_load_acq_int(&obj->pending_flip);
 
 	/* The actual obj->write_domain will be updated with
 	 * pending_write_domain after we emit the accumulated flush for all
@@ -263,6 +264,12 @@
 	free(eb, DRM_I915_GEM);
 }
 
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+		obj->cache_level != I915_CACHE_NONE);
+}
+
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_objects *eb,
@@ -270,6 +277,7 @@
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_object *target_obj;
+	struct drm_i915_gem_object *target_i915_obj;
 	uint32_t target_offset;
 	int ret = -EINVAL;
 
@@ -278,7 +286,8 @@
 	if (unlikely(target_obj == NULL))
 		return -ENOENT;
 
-	target_offset = to_intel_bo(target_obj)->gtt_offset;
+	target_i915_obj = to_intel_bo(target_obj);
+	target_offset = target_i915_obj->gtt_offset;
 
 #if WATCH_RELOC
 	DRM_INFO("%s: obj %p offset %08x target %d "
@@ -364,12 +373,20 @@
 		return ret;
 	}
 
+	/* We can't wait for rendering with pagefaults disabled */
+	if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
+		return (-EFAULT);
+
 	reloc->delta += target_offset;
-	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+	if (use_cpu_reloc(obj)) {
 		uint32_t page_offset = reloc->offset & PAGE_MASK;
 		char *vaddr;
 		struct sf_buf *sf;
 
+		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+		if (ret)
+			return ret;
+
 		sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)],
 		    SFB_NOWAIT);
 		if (sf == NULL)
@@ -381,13 +398,14 @@
 		uint32_t *reloc_entry;
 		char *reloc_page;
 
-		/* We can't wait for rendering with pagefaults disabled */
-		if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
-			return (-EFAULT);
-		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+		ret = i915_gem_object_set_to_gtt_domain(obj, true);
 		if (ret)
 			return ret;
 
+		ret = i915_gem_object_put_fence(obj);
+		if (ret)
+			return ret;
+
 		/*
 		 * Map the page containing the relocation we're going
 		 * to perform.
@@ -401,6 +419,16 @@
 		pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
 	}
 
+	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+	 * pipe_control writes because the gpu doesn't properly redirect them
+	 * through the ppgtt for non_secure batchbuffers. */
+	if (unlikely(IS_GEN6(dev) &&
+	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+	    !target_i915_obj->has_global_gtt_mapping)) {
+		i915_gem_gtt_bind_object(target_i915_obj,
+					 target_i915_obj->cache_level);
+	}
+
 	/* and update the user's relocation entry */
 	reloc->presumed_offset = target_offset;
 
@@ -411,28 +439,44 @@
 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
     struct eb_objects *eb)
 {
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 	struct drm_i915_gem_relocation_entry *user_relocs;
 	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-	struct drm_i915_gem_relocation_entry reloc;
-	int i, ret;
+	int remain, ret;
 
 	user_relocs = (void *)(uintptr_t)entry->relocs_ptr;
-	for (i = 0; i < entry->relocation_count; i++) {
-		ret = -copyin_nofault(user_relocs + i, &reloc, sizeof(reloc));
-		if (ret != 0)
-			return (ret);
+	remain = entry->relocation_count;
+	while (remain) {
+		struct drm_i915_gem_relocation_entry *r = stack_reloc;
+		int count = remain;
+		if (count > DRM_ARRAY_SIZE(stack_reloc))
+			count = DRM_ARRAY_SIZE(stack_reloc);
+		remain -= count;
 
-		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
+		ret = -copyin_nofault(user_relocs, r, count*sizeof(r[0]));
 		if (ret != 0)
 			return (ret);
 
-		ret = -copyout_nofault(&reloc.presumed_offset,
-		    &user_relocs[i].presumed_offset,
-		    sizeof(reloc.presumed_offset));
-		if (ret != 0)
-			return (ret);
+		do {
+			u64 offset = r->presumed_offset;
+ 
+			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+			if (ret)
+				return ret;
+
+			if (r->presumed_offset != offset &&
+			    copyout_nofault(&r->presumed_offset,
+					    &user_relocs->presumed_offset,
+					    sizeof(r->presumed_offset))) {
+				return -EFAULT;
+			}
+
+			user_relocs++;
+			r++;
+		} while (--count);
 	}
-
+#undef N_RELOC
 	return (0);
 }
 
@@ -487,6 +531,13 @@
 #define  __EXEC_OBJECT_HAS_FENCE (1<<31)
 
 static int
+need_reloc_mappable(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	return entry->relocation_count && !use_cpu_reloc(obj);
+}
+
+static int
 pin_and_fence_object(struct drm_i915_gem_object *obj,
 		     struct intel_ring_buffer *ring)
 {
@@ -499,8 +550,7 @@
 		has_fenced_gpu_access &&
 		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 		obj->tiling_mode != I915_TILING_NONE;
-	need_mappable =
-		entry->relocation_count ? true : need_fence;
+	need_mappable = need_fence || need_reloc_mappable(obj);
 
 	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
 	if (ret)
@@ -508,18 +558,13 @@
 
 	if (has_fenced_gpu_access) {
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-			if (obj->tiling_mode) {
-				ret = i915_gem_object_get_fence(obj, ring);
-				if (ret)
-					goto err_unpin;
+			ret = i915_gem_object_get_fence(obj);
+			if (ret)
+				goto err_unpin;
 
+			if (i915_gem_object_pin_fence(obj))
 				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
-				i915_gem_object_pin_fence(obj);
-			} else {
-				ret = i915_gem_object_put_fence(obj);
-				if (ret)
-					goto err_unpin;
-			}
+
 			obj->pending_fenced_gpu_access = true;
 		}
 	}
@@ -558,8 +603,7 @@
 			has_fenced_gpu_access &&
 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 			obj->tiling_mode != I915_TILING_NONE;
-		need_mappable =
-			entry->relocation_count ? true : need_fence;
+		need_mappable = need_fence || need_reloc_mappable(obj);
 
 		if (need_mappable)
 			list_move(&obj->exec_list, &ordered_objects);
@@ -600,8 +644,7 @@
 				has_fenced_gpu_access &&
 				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 				obj->tiling_mode != I915_TILING_NONE;
-			need_mappable =
-				entry->relocation_count ? true : need_fence;
+			need_mappable = need_fence || need_reloc_mappable(obj);
 
 			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
 			    (need_mappable && !obj->map_and_fenceable))
@@ -815,63 +858,7 @@
 	return 0;
 }
 
-static bool
-intel_enable_semaphores(struct drm_device *dev)
-{
-	if (INTEL_INFO(dev)->gen < 6)
-		return 0;
-
-	if (i915_semaphores >= 0)
-		return i915_semaphores;
-
-	/* Enable semaphores on SNB when IO remapping is off */
-	if (INTEL_INFO(dev)->gen == 6)
-		return !intel_iommu_enabled;
-
-	return 1;
-}
-
 static int
-i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
-			       struct intel_ring_buffer *to)
-{
-	struct intel_ring_buffer *from = obj->ring;
-	u32 seqno;
-	int ret, idx;
-
-	if (from == NULL || to == from)
-		return 0;
-
-	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
-	if (!intel_enable_semaphores(obj->base.dev))
-		return i915_gem_object_wait_rendering(obj);
-
-	idx = intel_ring_sync_index(from, to);
-
-	seqno = obj->last_rendering_seqno;
-	if (seqno <= from->sync_seqno[idx])
-		return 0;
-
-	if (seqno == from->outstanding_lazy_request) {
-		struct drm_i915_gem_request *request;
-
-		request = malloc(sizeof(*request), DRM_I915_GEM,
-		    M_WAITOK | M_ZERO);
-		ret = i915_add_request(from, NULL, request);
-		if (ret) {
-			free(request, DRM_I915_GEM);
-			return ret;
-		}
-
-		seqno = request->seqno;
-	}
-
-	from->sync_seqno[idx] = seqno;
-
-	return to->sync_to(to, from, seqno - 1);
-}
-
-static int
 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 {
 	u32 plane, flip_mask;
@@ -937,7 +924,7 @@
 	}
 
 	list_for_each_entry(obj, objects, exec_list) {
-		ret = i915_gem_execbuffer_sync_rings(obj, ring);
+		ret = i915_gem_object_sync(obj, ring);
 		if (ret)
 			return ret;
 	}
@@ -953,7 +940,7 @@
 
 static int
 validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
-    vm_page_t ***map)
+    vm_page_t ***map, int **maplen)
 {
 	vm_page_t *ma;
 	int i, length, page_count;
@@ -960,6 +947,8 @@
 
 	/* XXXKIB various limits checking is missing there */
 	*map = malloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO);
+	*maplen = malloc(count * sizeof(*maplen), DRM_I915_GEM, M_WAITOK |
+	    M_ZERO);
 	for (i = 0; i < count; i++) {
 		/* First check for malicious input causing overflow */
 		if (exec[i].relocation_count >
@@ -981,9 +970,10 @@
 		page_count = howmany(length, PAGE_SIZE) + 2;
 		ma = (*map)[i] = malloc(page_count * sizeof(vm_page_t),
 		    DRM_I915_GEM, M_WAITOK | M_ZERO);
-		if (vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
-		    exec[i].relocs_ptr, length, VM_PROT_READ | VM_PROT_WRITE,
-		    ma, page_count) == -1) {
+		(*maplen)[i] = vm_fault_quick_hold_pages(
+		    &curproc->p_vmspace->vm_map, exec[i].relocs_ptr, length,
+		    VM_PROT_READ | VM_PROT_WRITE, ma, page_count);
+		if ((*maplen)[i] == -1) {
 			free(ma, DRM_I915_GEM);
 			(*map)[i] = NULL;
 			return (-EFAULT);
@@ -1015,11 +1005,14 @@
 			obj->pending_gpu_write = true;
 			list_move_tail(&obj->gpu_write_list,
 				       &ring->gpu_write_list);
-			intel_mark_busy(ring->dev, obj);
+			if (obj->pin_count) /* check for potential scanout */
+				intel_mark_busy(ring->dev, obj);
 		}
 		CTR3(KTR_DRM, "object_change_domain move_to_active %p %x %x",
 		    obj, old_read, old_write);
 	}
+
+	intel_mark_busy(ring->dev, NULL);
 }
 
 int i915_gem_sync_exec_requests;
@@ -1051,8 +1044,10 @@
 	if (request == NULL || i915_add_request(ring, file, request)) {
 		i915_gem_next_request_seqno(ring);
 		free(request, DRM_I915_GEM);
-	} else if (i915_gem_sync_exec_requests)
-		i915_wait_request(ring, request->seqno, true);
+	} else if (i915_gem_sync_exec_requests) {
+		i915_wait_request(ring, request->seqno);
+		i915_gem_retire_requests(dev);
+	}
 }
 
 static void
@@ -1130,6 +1125,8 @@
 	struct drm_clip_rect *cliprects = NULL;
 	struct intel_ring_buffer *ring;
 	vm_page_t **relocs_ma;
+	int *relocs_len;
+	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 exec_start, exec_len;
 	u32 seqno;
 	u32 mask;
@@ -1143,7 +1140,8 @@
 	if (args->batch_len == 0)
 		return (0);
 
-	ret = validate_exec_list(exec, args->buffer_count, &relocs_ma);
+	ret = validate_exec_list(exec, args->buffer_count, &relocs_ma,
+	    &relocs_len);
 	if (ret != 0)
 		goto pre_struct_lock_err;
 
@@ -1153,18 +1151,22 @@
 		ring = &dev_priv->rings[RCS];
 		break;
 	case I915_EXEC_BSD:
-		if (!HAS_BSD(dev)) {
-			DRM_DEBUG("execbuf with invalid ring (BSD)\n");
-			return -EINVAL;
+		ring = &dev_priv->rings[VCS];
+		if (ctx_id != 0) {
+			DRM_DEBUG("Ring %s doesn't support contexts\n",
+				  ring->name);
+			ret = -EPERM;
+			goto pre_struct_lock_err;
 		}
-		ring = &dev_priv->rings[VCS];
 		break;
 	case I915_EXEC_BLT:
-		if (!HAS_BLT(dev)) {
-			DRM_DEBUG("execbuf with invalid ring (BLT)\n");
-			return -EINVAL;
+		ring = &dev_priv->rings[BCS];
+		if (ctx_id != 0) {
+			DRM_DEBUG("Ring %s doesn't support contexts\n",
+				  ring->name);
+			ret = -EPERM;
+			goto pre_struct_lock_err;
 		}
-		ring = &dev_priv->rings[BCS];
 		break;
 	default:
 		DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1172,6 +1174,12 @@
 		ret = -EINVAL;
 		goto pre_struct_lock_err;
 	}
+	if (!intel_ring_initialized(ring)) {
+		DRM_DEBUG("execbuf with invalid ring: %d\n",
+			  (int)(args->flags & I915_EXEC_RING_MASK));
+		ret = -EINVAL;
+		goto pre_struct_lock_err;
+	}
 
 	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
 	mask = I915_EXEC_CONSTANTS_MASK;
@@ -1216,6 +1224,12 @@
 			goto pre_struct_lock_err;
 		}
 
+		if (INTEL_INFO(dev)->gen >= 5) {
+			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+			ret = -EINVAL;
+			goto pre_struct_lock_err;
+		}
+
 		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
 			DRM_DEBUG("execbuf with %u cliprects\n",
 				  args->num_cliprects);
@@ -1235,14 +1249,16 @@
 		goto pre_struct_lock_err;
 
 	if (dev_priv->mm.suspended) {
+		DRM_UNLOCK(dev);
 		ret = -EBUSY;
-		goto struct_lock_err;
+		goto pre_struct_lock_err;
 	}
 
 	eb = eb_create(args->buffer_count);
 	if (eb == NULL) {
+		DRM_UNLOCK(dev);
 		ret = -ENOMEM;
-		goto struct_lock_err;
+		goto pre_struct_lock_err;
 	}
 
 	/* Look up object handles */
@@ -1306,6 +1322,10 @@
 	if (ret)
 		goto err;
 
+	ret = i915_switch_context(ring, file, ctx_id);
+	if (ret)
+		goto err;
+
 	seqno = i915_gem_next_request_seqno(ring);
 	for (i = 0; i < I915_NUM_RINGS - 1; i++) {
 		if (seqno < ring->sync_seqno[i]) {
@@ -1313,9 +1333,10 @@
 			 * so every billion or so execbuffers, we need to stall
 			 * the GPU in order to reset the counters.
 			 */
-			ret = i915_gpu_idle(dev, true);
+			ret = i915_gpu_idle(dev);
 			if (ret)
 				goto err;
+			i915_gem_retire_requests(dev);
 
 			KASSERT(ring->sync_seqno[i] == 0, ("Non-zero sync_seqno"));
 		}
@@ -1384,19 +1405,16 @@
 		list_del_init(&obj->exec_list);
 		drm_gem_object_unreference(&obj->base);
 	}
-struct_lock_err:
 	DRM_UNLOCK(dev);
 
 pre_struct_lock_err:
 	for (i = 0; i < args->buffer_count; i++) {
 		if (relocs_ma[i] != NULL) {
-			vm_page_unhold_pages(relocs_ma[i], howmany(
-			    exec[i].relocation_count *
-			    sizeof(struct drm_i915_gem_relocation_entry),
-			    PAGE_SIZE));
+			vm_page_unhold_pages(relocs_ma[i], relocs_len[i]);
 			free(relocs_ma[i], DRM_I915_GEM);
 		}
 	}
+	free(relocs_len, DRM_I915_GEM);
 	free(relocs_ma, DRM_I915_GEM);
 	free(cliprects, DRM_I915_GEM);
 	return ret;
@@ -1461,6 +1479,7 @@
 	exec2.num_cliprects = args->num_cliprects;
 	exec2.cliprects_ptr = args->cliprects_ptr;
 	exec2.flags = I915_EXEC_RENDER;
+	i915_execbuffer2_set_context_id(exec2, 0);
 
 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
 	if (!ret) {

Modified: trunk/sys/dev/drm2/i915/i915_gem_gtt.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_gem_gtt.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_gem_gtt.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2010 Daniel Vetter
  *
@@ -23,7 +24,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_gem_gtt.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_gem_gtt.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -245,7 +246,7 @@
 
 	if (dev_priv->mm.gtt.do_idle_maps) {
 		dev_priv->mm.interruptible = false;
-		if (i915_gpu_idle(dev_priv->dev, false)) {
+		if (i915_gpu_idle(dev_priv->dev)) {
 			DRM_ERROR("Couldn't idle GPU\n");
 			/* Wait a bit, in hopes it avoids the hang */
 			DELAY(10);
@@ -277,7 +278,7 @@
 
 	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 		i915_gem_clflush_object(obj);
-		i915_gem_gtt_rebind_object(obj, obj->cache_level);
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
 	}
 
 	intel_gtt_chipset_flush();
@@ -284,18 +285,14 @@
 }
 
 int
-i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 {
-	unsigned int agp_type;
 
-	agp_type = cache_level_to_agp_type(obj->base.dev, obj->cache_level);
-	intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
-	    obj->base.size >> PAGE_SHIFT, obj->pages, agp_type);
 	return (0);
 }
 
 void
-i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
     enum i915_cache_level cache_level)
 {
 	struct drm_device *dev;
@@ -308,11 +305,23 @@
 
 	intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
 	    obj->base.size >> PAGE_SHIFT, obj->pages, agp_type);
+
+	obj->has_global_gtt_mapping = 1;
 }
 
 void
 i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
+
+	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+	    obj->base.size >> PAGE_SHIFT);
+
+	obj->has_global_gtt_mapping = 0;
+}
+
+void
+i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool interruptible;
@@ -322,8 +331,35 @@
 
 	interruptible = do_idling(dev_priv);
 
-	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
-	    obj->base.size >> PAGE_SHIFT);
-
 	undo_idling(dev_priv, interruptible);
 }
+
+int
+i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
+    unsigned long mappable_end, unsigned long end)
+{
+	drm_i915_private_t *dev_priv;
+	unsigned long mappable;
+	int error;
+
+	dev_priv = dev->dev_private;
+	mappable = min(end, mappable_end) - start;
+
+	/* Substract the guard page ... */
+	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+
+	dev_priv->mm.gtt_start = start;
+	dev_priv->mm.gtt_mappable_end = mappable_end;
+	dev_priv->mm.gtt_end = end;
+	dev_priv->mm.gtt_total = end - start;
+	dev_priv->mm.mappable_gtt_total = mappable;
+
+	/* ... but ensure that we clear the entire range. */
+	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+	device_printf(dev->dev,
+	    "taking over the fictitious range 0x%lx-0x%lx\n",
+	    dev->agp->base + start, dev->agp->base + start + mappable);
+	error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
+	    dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
+	return (error);
+}

Modified: trunk/sys/dev/drm2/i915/i915_gem_tiling.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_gem_tiling.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_gem_tiling.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2008 Intel Corporation
  *
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_gem_tiling.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_gem_tiling.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -352,16 +353,24 @@
 		}
 	}
 
+	DRM_LOCK(dev);
 	if (args->tiling_mode != obj->tiling_mode ||
 	    args->stride != obj->stride) {
 		/* We need to rebind the object if its current allocation
 		 * no longer meets the alignment restrictions for its new
 		 * tiling mode. Otherwise we can just leave it alone, but
-		 * need to ensure that any fence register is cleared.
+		 * need to ensure that any fence register is updated before
+		 * the next fenced (either through the GTT or by the BLT unit
+		 * on older GPUs) access.
+		 *
+		 * After updating the tiling parameters, we then flag whether
+		 * we need to update an associated fence register. Note this
+		 * has to also include the unfenced register the GPU uses
+		 * whilst executing a fenced command for an untiled object.
 		 */
-		i915_gem_release_mmap(obj);
 
-		obj->map_and_fenceable = obj->gtt_space == NULL ||
+		obj->map_and_fenceable =
+			obj->gtt_space == NULL ||
 		    (obj->gtt_offset + obj->base.size <=
 		    dev_priv->mm.gtt_mappable_end &&
 		    i915_gem_object_fence_ok(obj, args->tiling_mode));
@@ -374,16 +383,25 @@
 			if (obj->gtt_offset & (unfenced_alignment - 1))
 				ret = i915_gem_object_unbind(obj);
 		}
+
 		if (ret == 0) {
-			obj->tiling_changed = true;
+			obj->fence_dirty =
+				obj->fenced_gpu_access ||
+				obj->fence_reg != I915_FENCE_REG_NONE;
+
+
 			obj->tiling_mode = args->tiling_mode;
 			obj->stride = args->stride;
+
+			/* Force the fence to be reacquired for GTT access */
+			i915_gem_release_mmap(obj);
 		}
- 	}
+	}
 	/* we have to maintain this existing ABI... */
 	args->stride = obj->stride;
 	args->tiling_mode = obj->tiling_mode;
 	drm_gem_object_unreference(&obj->base);
+	DRM_UNLOCK(dev);
 
 	return (ret);
 }
@@ -403,6 +421,8 @@
 	if (&obj->base == NULL)
 		return -ENOENT;
 
+	DRM_LOCK(dev);
+
 	args->tiling_mode = obj->tiling_mode;
 	switch (obj->tiling_mode) {
 	case I915_TILING_X:
@@ -425,6 +445,7 @@
 		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
 
 	drm_gem_object_unreference(&obj->base);
+	DRM_UNLOCK(dev);
 
 	return 0;
 }
@@ -456,6 +477,22 @@
 }
 
 void
+i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
+    vm_page_t m)
+{
+	char new_bit_17;
+
+	if (obj->bit_17 == NULL)
+		return;
+
+	new_bit_17 = VM_PAGE_TO_PHYS(m) >> 17;
+	if ((new_bit_17 & 0x1) != (test_bit(m->pindex, obj->bit_17) != 0)) {
+		i915_gem_swizzle_page(m);
+		vm_page_dirty(m);
+	}
+}
+
+void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
 	int page_count = obj->base.size >> PAGE_SHIFT;

Modified: trunk/sys/dev/drm2/i915/i915_ioc32.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_ioc32.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_ioc32.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (C) Paul Mackerras 2005
  * Copyright (C) Alan Hourihane 2005
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_ioc32.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include "opt_compat.h"
 
@@ -127,12 +128,12 @@
 	u32 region_offset;	/* offset from start of fb or agp */
 } drm_i915_mem_alloc32_t;
 
-drm_ioctl_desc_t i915_compat_ioctls[] = {
+struct drm_ioctl_desc i915_compat_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, compat_i915_batchbuffer, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, compat_i915_cmdbuffer, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_I915_GETPARAM, compat_i915_getparam, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, compat_i915_irq_emit, DRM_AUTH)
 };
-int i915_compat_ioctls_nr = DRM_ARRAY_SIZE(i915_compat_ioctls);
+int i915_compat_ioctls_nr = ARRAY_SIZE(i915_compat_ioctls);
 
 #endif


Property changes on: trunk/sys/dev/drm2/i915/i915_ioc32.c
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/dev/drm2/i915/i915_irq.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_irq.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_irq.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  */
 /*-
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_irq.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_irq.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -36,37 +37,11 @@
 #include <dev/drm2/i915/intel_drv.h>
 #include <sys/sched.h>
 #include <sys/sf_buf.h>
+#include <sys/sleepqueue.h>
 
 static void i915_capture_error_state(struct drm_device *dev);
 static u32 ring_last_seqno(struct intel_ring_buffer *ring);
 
-/**
- * Interrupts that are always left unmasked.
- *
- * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
- * we leave them always unmasked in IMR and then control enabling them through
- * PIPESTAT alone.
- */
-#define I915_INTERRUPT_ENABLE_FIX			\
-	(I915_ASLE_INTERRUPT |				\
-	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
-	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
-	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
-	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
-	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-
-/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
-
-#define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
-				 PIPE_VBLANK_INTERRUPT_STATUS)
-
-#define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
-				 PIPE_VBLANK_INTERRUPT_ENABLE)
-
-#define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
-					 DRM_I915_VBLANK_PIPE_B)
-
 /* For display hotplug interrupt */
 static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -120,6 +95,10 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
+	/* FIXME: opregion/asle for VLV */
+	if (IS_VALLEYVIEW(dev))
+		return;
+
 	mtx_lock(&dev_priv->irq_lock);
 
 	if (HAS_PCH_SPLIT(dev))
@@ -368,18 +347,16 @@
 			struct intel_ring_buffer *ring)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 seqno;
 
 	if (ring->obj == NULL)
 		return;
 
-	seqno = ring->get_seqno(ring);
-	CTR2(KTR_DRM, "request_complete %s %d", ring->name, seqno);
+	CTR2(KTR_DRM, "request_complete %s %d", ring->name,
+	    ring->get_seqno(ring));
 
-	mtx_lock(&ring->irq_lock);
-	ring->irq_seqno = seqno;
+	mtx_lock(&dev_priv->irq_lock);
 	wakeup(ring);
-	mtx_unlock(&ring->irq_lock);
+	mtx_unlock(&dev_priv->irq_lock);
 
 	if (i915_enable_hangcheck) {
 		dev_priv->hangcheck_count = 0;
@@ -445,14 +422,141 @@
 	DRM_UNLOCK(dev);
 }
 
-static void pch_irq_handler(struct drm_device *dev)
+static void snb_gt_irq_handler(struct drm_device *dev,
+			       struct drm_i915_private *dev_priv,
+			       u32 gt_iir)
 {
+
+	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
+		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+		notify_ring(dev, &dev_priv->rings[RCS]);
+	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->rings[VCS]);
+	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->rings[BCS]);
+
+	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+		      GT_RENDER_CS_ERROR_INTERRUPT)) {
+		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
+		i915_handle_error(dev, false);
+	}
+}
+
+static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
+				u32 pm_iir)
+{
+
+	/*
+	 * IIR bits should never already be set because IMR should
+	 * prevent an interrupt from being shown in IIR. The warning
+	 * displays a case where we've unsafely cleared
+	 * dev_priv->pm_iir. Although missing an interrupt of the same
+	 * type is not a problem, it displays a problem in the logic.
+	 *
+	 * The mask bit in IMR is cleared by rps_work.
+	 */
+
+	mtx_lock(&dev_priv->rps_lock);
+	if (dev_priv->pm_iir & pm_iir)
+		printf("Missed a PM interrupt\n");
+	dev_priv->pm_iir |= pm_iir;
+	I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+	POSTING_READ(GEN6_PMIMR);
+	mtx_unlock(&dev_priv->rps_lock);
+
+	taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
+}
+
+static void valleyview_irq_handler(void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u32 pch_iir;
+	u32 iir, gt_iir, pm_iir;
 	int pipe;
+	u32 pipe_stats[I915_MAX_PIPES];
+	u32 vblank_status;
+	int vblank = 0;
+	bool blc_event;
 
-	pch_iir = I915_READ(SDEIIR);
+	atomic_inc(&dev_priv->irq_received);
 
+	vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
+		PIPE_VBLANK_INTERRUPT_STATUS;
+
+	while (true) {
+		iir = I915_READ(VLV_IIR);
+		gt_iir = I915_READ(GTIIR);
+		pm_iir = I915_READ(GEN6_PMIIR);
+
+		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+			goto out;
+
+		snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+		mtx_lock(&dev_priv->irq_lock);
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+			}
+		}
+		mtx_unlock(&dev_priv->irq_lock);
+
+		/* Consume port.  Then clear IIR or we'll miss events */
+		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+					 hotplug_status);
+			if (hotplug_status & dev_priv->hotplug_supported_mask)
+				taskqueue_enqueue(dev_priv->tq,
+				    &dev_priv->hotplug_task);
+
+			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+			I915_READ(PORT_HOTPLUG_STAT);
+		}
+
+
+		if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
+			drm_handle_vblank(dev, 0);
+			vblank++;
+			intel_finish_page_flip(dev, 0);
+		}
+
+		if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
+			drm_handle_vblank(dev, 1);
+			vblank++;
+			intel_finish_page_flip(dev, 0);
+		}
+
+		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+			blc_event = true;
+
+		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+			gen6_queue_rps_work(dev_priv, pm_iir);
+
+		I915_WRITE(GTIIR, gt_iir);
+		I915_WRITE(GEN6_PMIIR, pm_iir);
+		I915_WRITE(VLV_IIR, iir);
+	}
+
+out:;
+}
+
+static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
 	if (pch_iir & SDE_AUDIO_POWER_MASK)
 		DRM_DEBUG("i915: PCH audio power change on port %d\n",
 				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -493,10 +597,8 @@
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
-#if 0
-	struct drm_i915_master_private *master_priv;
-#endif
+	u32 de_iir, gt_iir, de_ier, pm_iir;
+	int i;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -505,90 +607,66 @@
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 	POSTING_READ(DEIER);
 
-	de_iir = I915_READ(DEIIR);
-	gt_iir = I915_READ(GTIIR);
-	pch_iir = I915_READ(SDEIIR);
-	pm_iir = I915_READ(GEN6_PMIIR);
-
-	CTR4(KTR_DRM, "ivybridge_irq de %x gt %x pch %x pm %x", de_iir,
-	    gt_iir, pch_iir, pm_iir);
-
-	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
-		goto done;
-
-#if 0
-	if (dev->primary->master) {
-		master_priv = dev->primary->master->driver_priv;
-		if (master_priv->sarea_priv)
-			master_priv->sarea_priv->last_dispatch =
-				READ_BREADCRUMB(dev_priv);
+ 	gt_iir = I915_READ(GTIIR);
+	if (gt_iir) {
+		snb_gt_irq_handler(dev, dev_priv, gt_iir);
+		I915_WRITE(GTIIR, gt_iir);
 	}
-#else
-	if (dev_priv->sarea_priv)
-		dev_priv->sarea_priv->last_dispatch =
-		    READ_BREADCRUMB(dev_priv);
-#endif
 
-	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
-		notify_ring(dev, &dev_priv->rings[RCS]);
-	if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
-		notify_ring(dev, &dev_priv->rings[VCS]);
-	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		notify_ring(dev, &dev_priv->rings[BCS]);
+	de_iir = I915_READ(DEIIR);
+	if (de_iir) {
+		if (de_iir & DE_GSE_IVB)
+			intel_opregion_gse_intr(dev);
 
-	if (de_iir & DE_GSE_IVB) {
-#if 1
-		KIB_NOTYET();
-#else
-		intel_opregion_gse_intr(dev);
-#endif
-	}
+		for (i = 0; i < 3; i++) {
+			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+				intel_prepare_page_flip(dev, i);
+				intel_finish_page_flip_plane(dev, i);
+			}
+			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+				drm_handle_vblank(dev, i);
+		}
 
-	if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
-		intel_prepare_page_flip(dev, 0);
-		intel_finish_page_flip_plane(dev, 0);
-	}
+		/* check event from PCH */
+		if (de_iir & DE_PCH_EVENT_IVB) {
+			u32 pch_iir = I915_READ(SDEIIR);
 
-	if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
-		intel_prepare_page_flip(dev, 1);
-		intel_finish_page_flip_plane(dev, 1);
-	}
+			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+				taskqueue_enqueue(dev_priv->tq,
+				    &dev_priv->hotplug_task);
+			pch_irq_handler(dev, pch_iir);
 
-	if (de_iir & DE_PIPEA_VBLANK_IVB)
-		drm_handle_vblank(dev, 0);
+			/* clear PCH hotplug event before clear CPU irq */
+			I915_WRITE(SDEIIR, pch_iir);
+		}
 
-	if (de_iir & DE_PIPEB_VBLANK_IVB)
-		drm_handle_vblank(dev, 1);
-
-	/* check event from PCH */
-	if (de_iir & DE_PCH_EVENT_IVB) {
-		if (pch_iir & SDE_HOTPLUG_MASK_CPT)
-			taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task);
-		pch_irq_handler(dev);
+		I915_WRITE(DEIIR, de_iir);
 	}
 
-	if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
-		mtx_lock(&dev_priv->rps_lock);
-		if ((dev_priv->pm_iir & pm_iir) != 0)
-			printf("Missed a PM interrupt\n");
-		dev_priv->pm_iir |= pm_iir;
-		I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
-		POSTING_READ(GEN6_PMIMR);
-		mtx_unlock(&dev_priv->rps_lock);
-		taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
+	pm_iir = I915_READ(GEN6_PMIIR);
+	if (pm_iir) {
+		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+			gen6_queue_rps_work(dev_priv, pm_iir);
+		I915_WRITE(GEN6_PMIIR, pm_iir);
 	}
 
-	/* should clear PCH hotplug event before clear CPU irq */
-	I915_WRITE(SDEIIR, pch_iir);
-	I915_WRITE(GTIIR, gt_iir);
-	I915_WRITE(DEIIR, de_iir);
-	I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
 	I915_WRITE(DEIER, de_ier);
 	POSTING_READ(DEIER);
+
+	CTR3(KTR_DRM, "ivybridge_irq de %x gt %x pm %x", de_iir,
+	    gt_iir, pm_iir);
 }
 
+static void ilk_gt_irq_handler(struct drm_device *dev,
+			       struct drm_i915_private *dev_priv,
+			       u32 gt_iir)
+{
+	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+		notify_ring(dev, &dev_priv->rings[RCS]);
+	if (gt_iir & GT_BSD_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->rings[VCS]);
+}
+
 static void
 ironlake_irq_handler(void *arg)
 {
@@ -596,16 +674,9 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
 	u32 hotplug_mask;
-#if 0
-	struct drm_i915_master_private *master_priv;
-#endif
-	u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
 
 	atomic_inc(&dev_priv->irq_received);
 
-	if (IS_GEN6(dev))
-		bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
-
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -628,32 +699,13 @@
 	else
 		hotplug_mask = SDE_HOTPLUG_MASK;
 
-#if 0
-	if (dev->primary->master) {
-		master_priv = dev->primary->master->driver_priv;
-		if (master_priv->sarea_priv)
-			master_priv->sarea_priv->last_dispatch =
-				READ_BREADCRUMB(dev_priv);
-	}
-#else
-		if (dev_priv->sarea_priv)
-			dev_priv->sarea_priv->last_dispatch =
-			    READ_BREADCRUMB(dev_priv);
-#endif
+	if (IS_GEN5(dev))
+		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+	else
+		snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
-	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
-		notify_ring(dev, &dev_priv->rings[RCS]);
-	if (gt_iir & bsd_usr_interrupt)
-		notify_ring(dev, &dev_priv->rings[VCS]);
-	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		notify_ring(dev, &dev_priv->rings[BCS]);
-
 	if (de_iir & DE_GSE) {
-#if 1
-		KIB_NOTYET();
-#else
 		intel_opregion_gse_intr(dev);
-#endif
 	}
 
 	if (de_iir & DE_PLANEA_FLIP_DONE) {
@@ -677,7 +729,7 @@
 		if (pch_iir & hotplug_mask)
 			taskqueue_enqueue(dev_priv->tq,
 			    &dev_priv->hotplug_task);
-		pch_irq_handler(dev);
+		pch_irq_handler(dev, pch_iir);
 	}
 
 	if (de_iir & DE_PCU_EVENT) {
@@ -685,16 +737,8 @@
 		i915_handle_rps_change(dev);
 	}
 
-	if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
-		mtx_lock(&dev_priv->rps_lock);
-		if ((dev_priv->pm_iir & pm_iir) != 0)
-			printf("Missed a PM interrupt\n");
-		dev_priv->pm_iir |= pm_iir;
-		I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
-		POSTING_READ(GEN6_PMIMR);
-		mtx_unlock(&dev_priv->rps_lock);
-		taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
-	}
+	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+		gen6_queue_rps_work(dev_priv, pm_iir);
 
 	/* should clear PCH hotplug event before clear CPU irq */
 	I915_WRITE(SDEIIR, pch_iir);
@@ -725,7 +769,7 @@
 	if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
 		DRM_DEBUG("i915: resetting chip\n");
 		/* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
-		if (!i915_reset(dev, GRDOM_RENDER)) {
+		if (!i915_reset(dev)) {
 			atomic_store_rel_int(&dev_priv->mm.wedged, 0);
 			/* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
 		}
@@ -736,6 +780,8 @@
 	}
 }
 
+#define pr_err(...) printf(__VA_ARGS__)
+
 static void i915_report_and_clear_eir(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -751,26 +797,20 @@
 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
 			u32 ipeir = I915_READ(IPEIR_I965);
 
-			printf("  IPEIR: 0x%08x\n",
-			       I915_READ(IPEIR_I965));
-			printf("  IPEHR: 0x%08x\n",
-			       I915_READ(IPEHR_I965));
-			printf("  INSTDONE: 0x%08x\n",
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+			pr_err("  INSTDONE: 0x%08x\n",
 			       I915_READ(INSTDONE_I965));
-			printf("  INSTPS: 0x%08x\n",
-			       I915_READ(INSTPS));
-			printf("  INSTDONE1: 0x%08x\n",
-			       I915_READ(INSTDONE1));
-			printf("  ACTHD: 0x%08x\n",
-			       I915_READ(ACTHD_I965));
+			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
+			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
 			POSTING_READ(IPEIR_I965);
 		}
 		if (eir & GM45_ERROR_PAGE_TABLE) {
 			u32 pgtbl_err = I915_READ(PGTBL_ER);
-			printf("page table error\n");
-			printf("  PGTBL_ER: 0x%08x\n",
-			       pgtbl_err);
+			pr_err("page table error\n");
+			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
 			I915_WRITE(PGTBL_ER, pgtbl_err);
 			POSTING_READ(PGTBL_ER);
 		}
@@ -779,9 +819,8 @@
 	if (!IS_GEN2(dev)) {
 		if (eir & I915_ERROR_PAGE_TABLE) {
 			u32 pgtbl_err = I915_READ(PGTBL_ER);
-			printf("page table error\n");
-			printf("  PGTBL_ER: 0x%08x\n",
-			       pgtbl_err);
+			pr_err("page table error\n");
+			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
 			I915_WRITE(PGTBL_ER, pgtbl_err);
 			POSTING_READ(PGTBL_ER);
 		}
@@ -788,44 +827,34 @@
 	}
 
 	if (eir & I915_ERROR_MEMORY_REFRESH) {
-		printf("memory refresh error:\n");
+		pr_err("memory refresh error:\n");
 		for_each_pipe(pipe)
-			printf("pipe %c stat: 0x%08x\n",
+			pr_err("pipe %c stat: 0x%08x\n",
 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
 		/* pipestat has already been acked */
 	}
 	if (eir & I915_ERROR_INSTRUCTION) {
-		printf("instruction error\n");
-		printf("  INSTPM: 0x%08x\n",
-		       I915_READ(INSTPM));
+		pr_err("instruction error\n");
+		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
 		if (INTEL_INFO(dev)->gen < 4) {
 			u32 ipeir = I915_READ(IPEIR);
 
-			printf("  IPEIR: 0x%08x\n",
-			       I915_READ(IPEIR));
-			printf("  IPEHR: 0x%08x\n",
-			       I915_READ(IPEHR));
-			printf("  INSTDONE: 0x%08x\n",
-			       I915_READ(INSTDONE));
-			printf("  ACTHD: 0x%08x\n",
-			       I915_READ(ACTHD));
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
+			pr_err("  INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
 			I915_WRITE(IPEIR, ipeir);
 			POSTING_READ(IPEIR);
 		} else {
 			u32 ipeir = I915_READ(IPEIR_I965);
 
-			printf("  IPEIR: 0x%08x\n",
-			       I915_READ(IPEIR_I965));
-			printf("  IPEHR: 0x%08x\n",
-			       I915_READ(IPEHR_I965));
-			printf("  INSTDONE: 0x%08x\n",
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+			pr_err("  INSTDONE: 0x%08x\n",
 			       I915_READ(INSTDONE_I965));
-			printf("  INSTPS: 0x%08x\n",
-			       I915_READ(INSTPS));
-			printf("  INSTDONE1: 0x%08x\n",
-			       I915_READ(INSTDONE1));
-			printf("  ACTHD: 0x%08x\n",
-			       I915_READ(ACTHD_I965));
+			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
+			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
 			POSTING_READ(IPEIR_I965);
 		}
@@ -858,6 +887,8 @@
 void i915_handle_error(struct drm_device *dev, bool wedged)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int i;
 
 	i915_capture_error_state(dev);
 	i915_report_and_clear_eir(dev);
@@ -872,19 +903,11 @@
 		/*
 		 * Wakeup waiting processes so they don't hang
 		 */
-		mtx_lock(&dev_priv->rings[RCS].irq_lock);
-		wakeup(&dev_priv->rings[RCS]);
-		mtx_unlock(&dev_priv->rings[RCS].irq_lock);
-		if (HAS_BSD(dev)) {
-			mtx_lock(&dev_priv->rings[VCS].irq_lock);
-			wakeup(&dev_priv->rings[VCS]);
-			mtx_unlock(&dev_priv->rings[VCS].irq_lock);
+		for_each_ring(ring, dev_priv, i) {
+			mtx_lock(&dev_priv->irq_lock);
+			wakeup(ring);
+			mtx_unlock(&dev_priv->irq_lock);
 		}
-		if (HAS_BLT(dev)) {
-			mtx_lock(&dev_priv->rings[BCS].irq_lock);
-			wakeup(&dev_priv->rings[BCS]);
-			mtx_unlock(&dev_priv->rings[BCS].irq_lock);
-		}
 	}
 
 	taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
@@ -916,7 +939,8 @@
 	obj = work->pending_flip_obj;
 	if (INTEL_INFO(dev)->gen >= 4) {
 		int dspsurf = DSPSURF(intel_crtc->plane);
-		stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+					obj->gtt_offset;
 	} else {
 		int dspaddr = DSPADDR(intel_crtc->plane);
 		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
@@ -932,292 +956,6 @@
 	}
 }
 
-static void
-i915_driver_irq_handler(void *arg)
-{
-	struct drm_device *dev = (struct drm_device *)arg;
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private;
-#if 0
-	struct drm_i915_master_private *master_priv;
-#endif
-	u32 iir, new_iir;
-	u32 pipe_stats[I915_MAX_PIPES];
-	u32 vblank_status;
-	int vblank = 0;
-	int irq_received;
-	int pipe;
-	bool blc_event = false;
-
-	atomic_inc(&dev_priv->irq_received);
-
-	iir = I915_READ(IIR);
-
-	CTR1(KTR_DRM, "driver_irq_handler %x", iir);
-
-	if (INTEL_INFO(dev)->gen >= 4)
-		vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
-	else
-		vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
-
-	for (;;) {
-		irq_received = iir != 0;
-
-		/* Can't rely on pipestat interrupt bit in iir as it might
-		 * have been cleared after the pipestat interrupt was received.
-		 * It doesn't set the bit in iir again, but it still produces
-		 * interrupts (for non-MSI).
-		 */
-		mtx_lock(&dev_priv->irq_lock);
-		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-			i915_handle_error(dev, false);
-
-		for_each_pipe(pipe) {
-			int reg = PIPESTAT(pipe);
-			pipe_stats[pipe] = I915_READ(reg);
-
-			/*
-			 * Clear the PIPE*STAT regs before the IIR
-			 */
-			if (pipe_stats[pipe] & 0x8000ffff) {
-				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-					DRM_DEBUG("pipe %c underrun\n",
-							 pipe_name(pipe));
-				I915_WRITE(reg, pipe_stats[pipe]);
-				irq_received = 1;
-			}
-		}
-		mtx_unlock(&dev_priv->irq_lock);
-
-		if (!irq_received)
-			break;
-
-		/* Consume port.  Then clear IIR or we'll miss events */
-		if ((I915_HAS_HOTPLUG(dev)) &&
-		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
-			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
-
-			DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n",
-				  hotplug_status);
-			if (hotplug_status & dev_priv->hotplug_supported_mask)
-				taskqueue_enqueue(dev_priv->tq,
-				    &dev_priv->hotplug_task);
-
-			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-			I915_READ(PORT_HOTPLUG_STAT);
-		}
-
-		I915_WRITE(IIR, iir);
-		new_iir = I915_READ(IIR); /* Flush posted writes */
-
-#if 0
-		if (dev->primary->master) {
-			master_priv = dev->primary->master->driver_priv;
-			if (master_priv->sarea_priv)
-				master_priv->sarea_priv->last_dispatch =
-					READ_BREADCRUMB(dev_priv);
-		}
-#else
-		if (dev_priv->sarea_priv)
-			dev_priv->sarea_priv->last_dispatch =
-			    READ_BREADCRUMB(dev_priv);
-#endif
-
-		if (iir & I915_USER_INTERRUPT)
-			notify_ring(dev, &dev_priv->rings[RCS]);
-		if (iir & I915_BSD_USER_INTERRUPT)
-			notify_ring(dev, &dev_priv->rings[VCS]);
-
-		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
-			intel_prepare_page_flip(dev, 0);
-			if (dev_priv->flip_pending_is_done)
-				intel_finish_page_flip_plane(dev, 0);
-		}
-
-		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
-			intel_prepare_page_flip(dev, 1);
-			if (dev_priv->flip_pending_is_done)
-				intel_finish_page_flip_plane(dev, 1);
-		}
-
-		for_each_pipe(pipe) {
-			if (pipe_stats[pipe] & vblank_status &&
-			    drm_handle_vblank(dev, pipe)) {
-				vblank++;
-				if (!dev_priv->flip_pending_is_done) {
-					i915_pageflip_stall_check(dev, pipe);
-					intel_finish_page_flip(dev, pipe);
-				}
-			}
-
-			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
-				blc_event = true;
-		}
-
-
-		if (blc_event || (iir & I915_ASLE_INTERRUPT)) {
-#if 1
-			KIB_NOTYET();
-#else
-			intel_opregion_asle_intr(dev);
-#endif
-		}
-
-		/* With MSI, interrupts are only generated when iir
-		 * transitions from zero to nonzero.  If another bit got
-		 * set while we were handling the existing iir bits, then
-		 * we would never get another interrupt.
-		 *
-		 * This is fine on non-MSI as well, as if we hit this path
-		 * we avoid exiting the interrupt handler only to generate
-		 * another one.
-		 *
-		 * Note that for MSI this could cause a stray interrupt report
-		 * if an interrupt landed in the time between writing IIR and
-		 * the posting read.  This should be rare enough to never
-		 * trigger the 99% of 100,000 interrupts test for disabling
-		 * stray interrupts.
-		 */
-		iir = new_iir;
-	}
-}
-
-static int i915_emit_irq(struct drm_device * dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-#if 0
-	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-#endif
-
-	i915_kernel_lost_context(dev);
-
-	DRM_DEBUG("i915: emit_irq\n");
-
-	dev_priv->counter++;
-	if (dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->counter = 1;
-#if 0
-	if (master_priv->sarea_priv)
-		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
-#else
-	if (dev_priv->sarea_priv)
-		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
-#endif
-
-	if (BEGIN_LP_RING(4) == 0) {
-		OUT_RING(MI_STORE_DWORD_INDEX);
-		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-		OUT_RING(dev_priv->counter);
-		OUT_RING(MI_USER_INTERRUPT);
-		ADVANCE_LP_RING();
-	}
-
-	return dev_priv->counter;
-}
-
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-#if 0
-	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-#endif
-	int ret;
-	struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
-	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
-		  READ_BREADCRUMB(dev_priv));
-
-#if 0
-	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
-		if (master_priv->sarea_priv)
-			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-		return 0;
-	}
-
-	if (master_priv->sarea_priv)
-		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-#else
-	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
-		if (dev_priv->sarea_priv) {
-			dev_priv->sarea_priv->last_dispatch =
-				READ_BREADCRUMB(dev_priv);
-		}
-		return 0;
-	}
-
-	if (dev_priv->sarea_priv)
-		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-#endif
-
-	ret = 0;
-	mtx_lock(&ring->irq_lock);
-	if (ring->irq_get(ring)) {
-		DRM_UNLOCK(dev);
-		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
-			ret = -msleep(ring, &ring->irq_lock, PCATCH,
-			    "915wtq", 3 * hz);
-		}
-		ring->irq_put(ring);
-		mtx_unlock(&ring->irq_lock);
-		DRM_LOCK(dev);
-	} else {
-		mtx_unlock(&ring->irq_lock);
-		if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
-		     3000, 1, "915wir"))
-			ret = -EBUSY;
-	}
-
-	if (ret == -EBUSY) {
-		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
-			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
-	}
-
-	return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i915_irq_emit(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	drm_i915_irq_emit_t *emit = data;
-	int result;
-
-	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
-	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-	DRM_LOCK(dev);
-	result = i915_emit_irq(dev);
-	DRM_UNLOCK(dev);
-
-	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
-		DRM_ERROR("copy_to_user\n");
-		return -EFAULT;
-	}
-
-	return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i915_irq_wait(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	drm_i915_irq_wait_t *irqwait = data;
-
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
-	return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
@@ -1239,7 +977,7 @@
 
 	/* maintain vblank delivery even in deep C-states */
 	if (dev_priv->info->gen == 3)
-		I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
+		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
 	mtx_unlock(&dev_priv->irq_lock);
 	CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
 
@@ -1272,8 +1010,8 @@
 		return -EINVAL;
 
 	mtx_lock(&dev_priv->irq_lock);
-	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
-				    DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+	ironlake_enable_display_irq(dev_priv,
+				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
 	mtx_unlock(&dev_priv->irq_lock);
 	CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
 
@@ -1280,7 +1018,31 @@
 	return 0;
 }
 
+static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 dpfl, imr;
 
+	if (!i915_pipe_enabled(dev, pipe))
+		return -EINVAL;
+
+	mtx_lock(&dev_priv->irq_lock);
+	dpfl = I915_READ(VLV_DPFLIPSTAT);
+	imr = I915_READ(VLV_IMR);
+	if (pipe == 0) {
+		dpfl |= PIPEA_VBLANK_INT_EN;
+		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+	} else {
+		dpfl |= PIPEA_VBLANK_INT_EN;
+		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+	}
+	I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+	I915_WRITE(VLV_IMR, imr);
+	mtx_unlock(&dev_priv->irq_lock);
+
+	return 0;
+}
+
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
@@ -1291,8 +1053,7 @@
 
 	mtx_lock(&dev_priv->irq_lock);
 	if (dev_priv->info->gen == 3)
-		I915_WRITE(INSTPM,
-			   INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
 
 	i915_disable_pipestat(dev_priv, pipe,
 	    PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -1319,66 +1080,32 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
 	mtx_lock(&dev_priv->irq_lock);
-	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
-				     DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+	ironlake_disable_display_irq(dev_priv,
+				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
 	mtx_unlock(&dev_priv->irq_lock);
 	CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
 }
 
-/* Set the vblank monitor pipe
- */
-int i915_vblank_pipe_set(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv)
+static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 dpfl, imr;
 
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
+	mtx_lock(&dev_priv->irq_lock);
+	dpfl = I915_READ(VLV_DPFLIPSTAT);
+	imr = I915_READ(VLV_IMR);
+	if (pipe == 0) {
+		dpfl &= ~PIPEA_VBLANK_INT_EN;
+		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+	} else {
+		dpfl &= ~PIPEB_VBLANK_INT_EN;
+		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 	}
-
-	return 0;
+	I915_WRITE(VLV_IMR, imr);
+	I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+	mtx_unlock(&dev_priv->irq_lock);
 }
 
-int i915_vblank_pipe_get(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	drm_i915_vblank_pipe_t *pipe = data;
-
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
-	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
-	return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-int i915_vblank_swap(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
-{
-	/* The delayed swap mechanism was fundamentally racy, and has been
-	 * removed.  The model was that the client requested a delayed flip/swap
-	 * from the kernel, then waited for vblank before continuing to perform
-	 * rendering.  The problem was that the kernel might wake the client
-	 * up before it dispatched the vblank swap (since the lock has to be
-	 * held while touching the ringbuffer), in which case the client would
-	 * clear and start the next frame before the swap occurred, and
-	 * flicker would occur in addition to likely missing the vblank.
-	 *
-	 * In the absence of this ioctl, userland falls back to a correct path
-	 * of waiting for a vblank, then dispatching the swap on its own.
-	 * Context switching to userland and back is plenty fast enough for
-	 * meeting the requirements of vblank swapping.
-	 */
-	return -EINVAL;
-}
-
 static u32
 ring_last_seqno(struct intel_ring_buffer *ring)
 {
@@ -1395,15 +1122,15 @@
 	if (list_empty(&ring->request_list) ||
 	    i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
 		/* Issue a wake-up to catch stuck h/w. */
-		if (ring->waiting_seqno) {
-			DRM_ERROR(
-"Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
-				  ring->name,
-				  ring->waiting_seqno,
-				  ring->get_seqno(ring));
+		sleepq_lock(ring);
+		if (sleepq_sleepcnt(ring, 0) != 0) {
+			sleepq_release(ring);
+			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+				  ring->name);
 			wakeup(ring);
 			*err = true;
-		}
+		} else
+			sleepq_release(ring);
 		return true;
 	}
 	return false;
@@ -1423,6 +1150,35 @@
 	return false;
 }
 
+static bool i915_hangcheck_hung(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->hangcheck_count++ > 1) {
+		bool hung = true;
+
+		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+		i915_handle_error(dev, true);
+
+		if (!IS_GEN2(dev)) {
+			struct intel_ring_buffer *ring;
+			int i;
+
+			/* Is the chip hanging on a WAIT_FOR_EVENT?
+			 * If so we can simply poke the RB_WAIT bit
+			 * and break the hang. This should work on
+			 * all but the second generation chipsets.
+			 */
+			for_each_ring(ring, dev_priv, i)
+				hung &= !kick_ring(ring);
+		}
+
+		return hung;
+	}
+
+	return false;
+}
+
 /**
  * This is called when the chip hasn't reported back with completed
  * batchbuffers in a long time. The first time this is called we simply record
@@ -1434,19 +1190,31 @@
 {
 	struct drm_device *dev = (struct drm_device *)context;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
-	bool err = false;
+	uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+	struct intel_ring_buffer *ring;
+	bool err = false, idle;
+	int i;
 
 	if (!i915_enable_hangcheck)
 		return;
 
+	memset(acthd, 0, sizeof(acthd));
+	idle = true;
+	for_each_ring(ring, dev_priv, i) {
+	    idle &= i915_hangcheck_ring_idle(ring, &err);
+	    acthd[i] = intel_ring_get_active_head(ring);
+	}
+
 	/* If all work is done then ACTHD clearly hasn't advanced. */
-	if (i915_hangcheck_ring_idle(&dev_priv->rings[RCS], &err) &&
-	    i915_hangcheck_ring_idle(&dev_priv->rings[VCS], &err) &&
-	    i915_hangcheck_ring_idle(&dev_priv->rings[BCS], &err)) {
+	if (idle) {
+		if (err) {
+			if (i915_hangcheck_hung(dev))
+				return;
+
+			goto repeat;
+		}
+
 		dev_priv->hangcheck_count = 0;
-		if (err)
-			goto repeat;
 		return;
 	}
 
@@ -1457,47 +1225,15 @@
 		instdone = I915_READ(INSTDONE_I965);
 		instdone1 = I915_READ(INSTDONE1);
 	}
-	acthd = intel_ring_get_active_head(&dev_priv->rings[RCS]);
-	acthd_bsd = HAS_BSD(dev) ?
-		intel_ring_get_active_head(&dev_priv->rings[VCS]) : 0;
-	acthd_blt = HAS_BLT(dev) ?
-		intel_ring_get_active_head(&dev_priv->rings[BCS]) : 0;
-
-	if (dev_priv->last_acthd == acthd &&
-	    dev_priv->last_acthd_bsd == acthd_bsd &&
-	    dev_priv->last_acthd_blt == acthd_blt &&
+	if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
 	    dev_priv->last_instdone == instdone &&
 	    dev_priv->last_instdone1 == instdone1) {
-		if (dev_priv->hangcheck_count++ > 1) {
-			DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
-			i915_handle_error(dev, true);
-
-			if (!IS_GEN2(dev)) {
-				/* Is the chip hanging on a WAIT_FOR_EVENT?
-				 * If so we can simply poke the RB_WAIT bit
-				 * and break the hang. This should work on
-				 * all but the second generation chipsets.
-				 */
-				if (kick_ring(&dev_priv->rings[RCS]))
-					goto repeat;
-
-				if (HAS_BSD(dev) &&
-				    kick_ring(&dev_priv->rings[VCS]))
-					goto repeat;
-
-				if (HAS_BLT(dev) &&
-				    kick_ring(&dev_priv->rings[BCS]))
-					goto repeat;
-			}
-
+		if (i915_hangcheck_hung(dev))
 			return;
-		}
 	} else {
 		dev_priv->hangcheck_count = 0;
 
-		dev_priv->last_acthd = acthd;
-		dev_priv->last_acthd_bsd = acthd_bsd;
-		dev_priv->last_acthd_blt = acthd_blt;
+		memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
 		dev_priv->last_instdone = instdone;
 		dev_priv->last_instdone1 = instdone1;
 	}
@@ -1516,13 +1252,6 @@
 
 	atomic_set(&dev_priv->irq_received, 0);
 
-	TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
-	    dev->dev_private);
-	TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
-	    dev->dev_private);
-	TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
-	    dev->dev_private);
-
 	I915_WRITE(HWSTAM, 0xeffe);
 
 	/* XXX hotplug from PCH */
@@ -1542,6 +1271,38 @@
 	POSTING_READ(SDEIER);
 }
 
+static void valleyview_irq_preinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	/* VLV magic */
+	I915_WRITE(VLV_IMR, 0);
+	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+	/* and GT */
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIMR, 0xffffffff);
+	I915_WRITE(GTIER, 0x0);
+	POSTING_READ(GTIER);
+
+	I915_WRITE(DPINVGTT, 0xff);
+
+	I915_WRITE(PORT_HOTPLUG_EN, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0xffff);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(VLV_IMR, 0xffffffff);
+	I915_WRITE(VLV_IER, 0x0);
+	POSTING_READ(VLV_IER);
+}
+
 /*
  * Enable digital hotplug on the PCH, and configure the DP short pulse
  * duration to 2ms (which is the minimum in the Display Port spec)
@@ -1571,7 +1332,6 @@
 	u32 render_irqs;
 	u32 hotplug_mask;
 
-	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 	dev_priv->irq_mask = ~display_mask;
 
 	/* should always can generate irq */
@@ -1588,8 +1348,8 @@
 	if (IS_GEN6(dev))
 		render_irqs =
 			GT_USER_INTERRUPT |
-			GT_GEN6_BSD_USER_INTERRUPT |
-			GT_BLT_USER_INTERRUPT;
+			GEN6_BSD_USER_INTERRUPT |
+			GEN6_BLITTER_USER_INTERRUPT;
 	else
 		render_irqs =
 			GT_USER_INTERRUPT |
@@ -1635,20 +1395,24 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	/* enable kind of interrupts always enabled */
-	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
-		DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
-		DE_PLANEB_FLIP_DONE_IVB;
+	u32 display_mask =
+		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
+		DE_PLANEC_FLIP_DONE_IVB |
+		DE_PLANEB_FLIP_DONE_IVB |
+		DE_PLANEA_FLIP_DONE_IVB;
 	u32 render_irqs;
 	u32 hotplug_mask;
 
-	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 	dev_priv->irq_mask = ~display_mask;
 
 	/* should always can generate irq */
 	I915_WRITE(DEIIR, I915_READ(DEIIR));
 	I915_WRITE(DEIMR, dev_priv->irq_mask);
-	I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
-		   DE_PIPEB_VBLANK_IVB);
+	I915_WRITE(DEIER,
+		   display_mask |
+		   DE_PIPEC_VBLANK_IVB |
+		   DE_PIPEB_VBLANK_IVB |
+		   DE_PIPEA_VBLANK_IVB);
 	POSTING_READ(DEIER);
 
 	dev_priv->gt_irq_mask = ~0;
@@ -1656,8 +1420,8 @@
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-	render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
-		GT_BLT_USER_INTERRUPT;
+	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+		GEN6_BLITTER_USER_INTERRUPT;
 	I915_WRITE(GTIER, render_irqs);
 	POSTING_READ(GTIER);
 
@@ -1677,26 +1441,500 @@
 	return 0;
 }
 
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 render_irqs;
+	u32 enable_mask;
+	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+	u16 msid;
+
+	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
+	enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+	dev_priv->irq_mask = ~enable_mask;
+
+	dev_priv->pipestat[0] = 0;
+	dev_priv->pipestat[1] = 0;
+
+	/* Hack for broken MSIs on VLV */
+	pci_write_config(dev->dev, 0x94, 0xfee00000, 4);
+	msid = pci_read_config(dev->dev, 0x98, 2);
+	msid &= 0xff; /* mask out delivery bits */
+	msid |= (1<<14);
+	pci_write_config(dev->dev, 0x98, msid, 2);
+
+	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+	I915_WRITE(VLV_IER, enable_mask);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(PIPESTAT(0), 0xffff);
+	I915_WRITE(PIPESTAT(1), 0xffff);
+	POSTING_READ(VLV_IER);
+
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+
+	render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
+		GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+		GT_GEN6_BLT_USER_INTERRUPT |
+		GT_GEN6_BSD_USER_INTERRUPT |
+		GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+		GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
+		GT_PIPE_NOTIFY |
+		GT_RENDER_CS_ERROR_INTERRUPT |
+		GT_SYNC_STATUS |
+		GT_USER_INTERRUPT;
+
+	dev_priv->gt_irq_mask = ~render_irqs;
+
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIMR, 0);
+	I915_WRITE(GTIER, render_irqs);
+	POSTING_READ(GTIER);
+
+	/* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+#if 0 /* FIXME: check register definitions; some have moved */
+	/* Note HDMI and DP share bits */
+	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+		hotplug_en |= HDMID_HOTPLUG_INT_EN;
+	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+		hotplug_en |= CRT_HOTPLUG_INT_EN;
+		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+	}
+#endif
+
+	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+
+	return 0;
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	if (!dev_priv)
+		return;
+
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+	I915_WRITE(PORT_HOTPLUG_EN, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0xffff);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(VLV_IMR, 0xffffffff);
+	I915_WRITE(VLV_IER, 0x0);
+	POSTING_READ(VLV_IER);
+}
+
 static void
-i915_driver_irq_preinstall(struct drm_device * dev)
+ironlake_irq_uninstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	if (dev_priv == NULL)
+		return;
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+
+	I915_WRITE(DEIMR, 0xffffffff);
+	I915_WRITE(DEIER, 0x0);
+	I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+	I915_WRITE(GTIMR, 0xffffffff);
+	I915_WRITE(GTIER, 0x0);
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+	I915_WRITE(SDEIMR, 0xffffffff);
+	I915_WRITE(SDEIER, 0x0);
+	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+}
+
+static void i8xx_irq_preinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
+ 
+	atomic_set(&dev_priv->irq_received, 0);
+ 
+ 	for_each_pipe(pipe)
+ 		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE16(IMR, 0xffff);
+	I915_WRITE16(IER, 0x0);
+	POSTING_READ16(IER);
+}
 
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	dev_priv->pipestat[0] = 0;
+	dev_priv->pipestat[1] = 0;
+
+	I915_WRITE16(EMR,
+		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+	/* Unmask the interrupts that we always want on. */
+	dev_priv->irq_mask =
+		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+	I915_WRITE16(IMR, dev_priv->irq_mask);
+
+	I915_WRITE16(IER,
+		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+		     I915_USER_INTERRUPT);
+	POSTING_READ16(IER);
+
+	return 0;
+}
+
+static void i8xx_irq_handler(void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u16 iir, new_iir;
+	u32 pipe_stats[2];
+	int irq_received;
+	int pipe;
+	u16 flip_mask =
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	iir = I915_READ16(IIR);
+	if (iir == 0)
+		return;
+
+	while (iir & ~flip_mask) {
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		mtx_lock(&dev_priv->irq_lock);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			i915_handle_error(dev, false);
+
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = 1;
+			}
+		}
+		mtx_unlock(&dev_priv->irq_lock);
+
+		I915_WRITE16(IIR, iir & ~flip_mask);
+		new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+		i915_update_dri1_breadcrumb(dev);
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->rings[RCS]);
+
+		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
+		    drm_handle_vblank(dev, 0)) {
+			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+				intel_prepare_page_flip(dev, 0);
+				intel_finish_page_flip(dev, 0);
+				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+			}
+		}
+
+		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
+		    drm_handle_vblank(dev, 1)) {
+			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
+				intel_prepare_page_flip(dev, 1);
+				intel_finish_page_flip(dev, 1);
+				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+			}
+		}
+
+		iir = new_iir;
+	}
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	for_each_pipe(pipe) {
+		/* Clear enable bits; then clear status bits */
+		I915_WRITE(PIPESTAT(pipe), 0);
+		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+	}
+	I915_WRITE16(IMR, 0xffff);
+	I915_WRITE16(IER, 0x0);
+	I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
 	atomic_set(&dev_priv->irq_received, 0);
 
-	TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
-	    dev->dev_private);
-	TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
-	    dev->dev_private);
-	TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
-	    dev->dev_private);
+	if (I915_HAS_HOTPLUG(dev)) {
+		I915_WRITE(PORT_HOTPLUG_EN, 0);
+		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	}
 
+	I915_WRITE16(HWSTAM, 0xeffe);
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+	POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 enable_mask;
+
+	dev_priv->pipestat[0] = 0;
+	dev_priv->pipestat[1] = 0;
+
+	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+	/* Unmask the interrupts that we always want on. */
+	dev_priv->irq_mask =
+		~(I915_ASLE_INTERRUPT |
+		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+	enable_mask =
+		I915_ASLE_INTERRUPT |
+		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+		I915_USER_INTERRUPT;
+
 	if (I915_HAS_HOTPLUG(dev)) {
+		/* Enable in IER... */
+		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+		/* and unmask in IMR */
+		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+	}
+
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	I915_WRITE(IER, enable_mask);
+	POSTING_READ(IER);
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+			hotplug_en |= HDMID_HOTPLUG_INT_EN;
+		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+			hotplug_en |= CRT_HOTPLUG_INT_EN;
+			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+		}
+
+		/* Ignore TV since it's buggy */
+
+		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+	}
+
+	intel_opregion_enable_asle(dev);
+
+	return 0;
+}
+
+static void i915_irq_handler(void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+	u32 flip_mask =
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+	u32 flip[2] = {
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
+	};
+	int pipe;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	iir = I915_READ(IIR);
+	do {
+		bool irq_received = (iir & ~flip_mask) != 0;
+		bool blc_event = false;
+
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		mtx_lock(&dev_priv->irq_lock);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			i915_handle_error(dev, false);
+
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/* Clear the PIPE*STAT regs before the IIR */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = true;
+			}
+		}
+		mtx_unlock(&dev_priv->irq_lock);
+
+		if (!irq_received)
+			break;
+
+		/* Consume port.  Then clear IIR or we'll miss events */
+		if ((I915_HAS_HOTPLUG(dev)) &&
+		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+				  hotplug_status);
+			if (hotplug_status & dev_priv->hotplug_supported_mask)
+				taskqueue_enqueue(dev_priv->tq,
+				    &dev_priv->hotplug_task);
+
+			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+			POSTING_READ(PORT_HOTPLUG_STAT);
+		}
+
+		I915_WRITE(IIR, iir & ~flip_mask);
+		new_iir = I915_READ(IIR); /* Flush posted writes */
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->rings[RCS]);
+
+		for_each_pipe(pipe) {
+			int plane = pipe;
+			if (IS_MOBILE(dev))
+				plane = !plane;
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+			    drm_handle_vblank(dev, pipe)) {
+				if (iir & flip[plane]) {
+					intel_prepare_page_flip(dev, plane);
+					intel_finish_page_flip(dev, pipe);
+					flip_mask &= ~flip[plane];
+				}
+			}
+
+			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+				blc_event = true;
+		}
+
+		if (blc_event || (iir & I915_ASLE_INTERRUPT))
+			intel_opregion_asle_intr(dev);
+
+
+		/* With MSI, interrupts are only generated when iir
+		 * transitions from zero to nonzero.  If another bit got
+		 * set while we were handling the existing iir bits, then
+		 * we would never get another interrupt.
+		 *
+		 * This is fine on non-MSI as well, as if we hit this path
+		 * we avoid exiting the interrupt handler only to generate
+		 * another one.
+		 *
+		 * Note that for MSI this could cause a stray interrupt report
+		 * if an interrupt landed in the time between writing IIR and
+		 * the posting read.  This should be rare enough to never
+		 * trigger the 99% of 100,000 interrupts test for disabling
+		 * stray interrupts.
+		 */
+		iir = new_iir;
+	} while (iir & ~flip_mask);
+
+	i915_update_dri1_breadcrumb(dev);
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	if (!dev_priv)
+		return;
+
+	if (I915_HAS_HOTPLUG(dev)) {
 		I915_WRITE(PORT_HOTPLUG_EN, 0);
 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 	}
 
+	I915_WRITE16(HWSTAM, 0xffff);
+	for_each_pipe(pipe) {
+		/* Clear enable bits; then clear status bits */
+		I915_WRITE(PIPESTAT(pipe), 0);
+		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+	}
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+
+	I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		I915_WRITE(PORT_HOTPLUG_EN, 0);
+		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	}
+
 	I915_WRITE(HWSTAM, 0xeffe);
 	for_each_pipe(pipe)
 		I915_WRITE(PIPESTAT(pipe), 0);
@@ -1705,22 +1943,26 @@
 	POSTING_READ(IER);
 }
 
-/*
- * Must be called after intel_modeset_init or hotplug interrupts won't be
- * enabled correctly.
- */
-static int
-i915_driver_irq_postinstall(struct drm_device *dev)
+static int i965_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+	u32 enable_mask;
 	u32 error_mask;
 
-	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
 	/* Unmask the interrupts that we always want on. */
-	dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
+	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
+	enable_mask = ~dev_priv->irq_mask;
+	enable_mask |= I915_USER_INTERRUPT;
+
+	if (IS_G4X(dev))
+		enable_mask |= I915_BSD_USER_INTERRUPT;
+
 	dev_priv->pipestat[0] = 0;
 	dev_priv->pipestat[1] = 0;
 
@@ -1781,54 +2023,128 @@
 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 	}
 
-#if 1
-	KIB_NOTYET();
-#else
 	intel_opregion_enable_asle(dev);
-#endif
 
 	return 0;
 }
 
-static void
-ironlake_irq_uninstall(struct drm_device *dev)
+static void i965_irq_handler(void *arg)
 {
+	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 iir, new_iir;
+	u32 pipe_stats[I915_MAX_PIPES];
+	int irq_received;
+	int pipe;
 
-	if (dev_priv == NULL)
-		return;
+	atomic_inc(&dev_priv->irq_received);
 
-	dev_priv->vblank_pipe = 0;
+	iir = I915_READ(IIR);
 
-	I915_WRITE(HWSTAM, 0xffffffff);
+	for (;;) {
+		bool blc_event = false;
 
-	I915_WRITE(DEIMR, 0xffffffff);
-	I915_WRITE(DEIER, 0x0);
-	I915_WRITE(DEIIR, I915_READ(DEIIR));
+		irq_received = iir != 0;
 
-	I915_WRITE(GTIMR, 0xffffffff);
-	I915_WRITE(GTIER, 0x0);
-	I915_WRITE(GTIIR, I915_READ(GTIIR));
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		mtx_lock(&dev_priv->irq_lock);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			i915_handle_error(dev, false);
 
-	I915_WRITE(SDEIMR, 0xffffffff);
-	I915_WRITE(SDEIER, 0x0);
-	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
 
-	taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
-	taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
-	taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = 1;
+			}
+		}
+		mtx_unlock(&dev_priv->irq_lock);
+
+		if (!irq_received)
+			break;
+
+		/* Consume port.  Then clear IIR or we'll miss events */
+		if ((I915_HAS_HOTPLUG(dev)) &&
+		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+				  hotplug_status);
+			if (hotplug_status & dev_priv->hotplug_supported_mask)
+				taskqueue_enqueue(dev_priv->tq,
+				    &dev_priv->hotplug_task);
+
+			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+			I915_READ(PORT_HOTPLUG_STAT);
+		}
+
+		I915_WRITE(IIR, iir);
+		new_iir = I915_READ(IIR); /* Flush posted writes */
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->rings[RCS]);
+		if (iir & I915_BSD_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->rings[VCS]);
+
+		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+			intel_prepare_page_flip(dev, 0);
+
+		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+			intel_prepare_page_flip(dev, 1);
+
+		for_each_pipe(pipe) {
+			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+			    drm_handle_vblank(dev, pipe)) {
+				i915_pageflip_stall_check(dev, pipe);
+				intel_finish_page_flip(dev, pipe);
+			}
+
+			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+				blc_event = true;
+		}
+
+
+		if (blc_event || (iir & I915_ASLE_INTERRUPT))
+			intel_opregion_asle_intr(dev);
+
+		/* With MSI, interrupts are only generated when iir
+		 * transitions from zero to nonzero.  If another bit got
+		 * set while we were handling the existing iir bits, then
+		 * we would never get another interrupt.
+		 *
+		 * This is fine on non-MSI as well, as if we hit this path
+		 * we avoid exiting the interrupt handler only to generate
+		 * another one.
+		 *
+		 * Note that for MSI this could cause a stray interrupt report
+		 * if an interrupt landed in the time between writing IIR and
+		 * the posting read.  This should be rare enough to never
+		 * trigger the 99% of 100,000 interrupts test for disabling
+		 * stray interrupts.
+		 */
+		iir = new_iir;
+	}
+
+	i915_update_dri1_breadcrumb(dev);
 }
 
-static void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i965_irq_uninstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
 
-	if (!dev_priv)
-		return;
-
-	dev_priv->vblank_pipe = 0;
-
 	if (I915_HAS_HOTPLUG(dev)) {
 		I915_WRITE(PORT_HOTPLUG_EN, 0);
 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -1844,23 +2160,25 @@
 		I915_WRITE(PIPESTAT(pipe),
 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
 	I915_WRITE(IIR, I915_READ(IIR));
-
-	taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
-	taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
-	taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
 }
 
-void
-intel_irq_init(struct drm_device *dev)
+void intel_irq_init(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
+	    dev->dev_private);
+	TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
+	    dev->dev_private);
+	TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
+	    dev->dev_private);
+
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-	if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
 	}
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
 	else
@@ -1867,7 +2185,14 @@
 		dev->driver->get_vblank_timestamp = NULL;
 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
 
-	if (IS_IVYBRIDGE(dev)) {
+	if (IS_VALLEYVIEW(dev)) {
+		dev->driver->irq_handler = valleyview_irq_handler;
+		dev->driver->irq_preinstall = valleyview_irq_preinstall;
+		dev->driver->irq_postinstall = valleyview_irq_postinstall;
+		dev->driver->irq_uninstall = valleyview_irq_uninstall;
+		dev->driver->enable_vblank = valleyview_enable_vblank;
+		dev->driver->disable_vblank = valleyview_disable_vblank;
+	} else if (IS_IVYBRIDGE(dev)) {
 		/* Share pre & uninstall handlers with ILK/SNB */
 		dev->driver->irq_handler = ivybridge_irq_handler;
 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -1875,6 +2200,14 @@
 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
 		dev->driver->enable_vblank = ivybridge_enable_vblank;
 		dev->driver->disable_vblank = ivybridge_disable_vblank;
+	} else if (IS_HASWELL(dev)) {
+		/* Share interrupts handling with IVB */
+		dev->driver->irq_handler = ivybridge_irq_handler;
+		dev->driver->irq_preinstall = ironlake_irq_preinstall;
+		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+		dev->driver->irq_uninstall = ironlake_irq_uninstall;
+		dev->driver->enable_vblank = ivybridge_enable_vblank;
+		dev->driver->disable_vblank = ivybridge_disable_vblank;
 	} else if (HAS_PCH_SPLIT(dev)) {
 		dev->driver->irq_handler = ironlake_irq_handler;
 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -1883,10 +2216,25 @@
 		dev->driver->enable_vblank = ironlake_enable_vblank;
 		dev->driver->disable_vblank = ironlake_disable_vblank;
 	} else {
-		dev->driver->irq_preinstall = i915_driver_irq_preinstall;
-		dev->driver->irq_postinstall = i915_driver_irq_postinstall;
-		dev->driver->irq_uninstall = i915_driver_irq_uninstall;
-		dev->driver->irq_handler = i915_driver_irq_handler;
+		if (INTEL_INFO(dev)->gen == 2) {
+			dev->driver->irq_preinstall = i8xx_irq_preinstall;
+			dev->driver->irq_postinstall = i8xx_irq_postinstall;
+			dev->driver->irq_handler = i8xx_irq_handler;
+			dev->driver->irq_uninstall = i8xx_irq_uninstall;
+		} else if (INTEL_INFO(dev)->gen == 3) {
+			/* IIR "flip pending" means done if this bit is set */
+			I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+			dev->driver->irq_preinstall = i915_irq_preinstall;
+			dev->driver->irq_postinstall = i915_irq_postinstall;
+			dev->driver->irq_uninstall = i915_irq_uninstall;
+			dev->driver->irq_handler = i915_irq_handler;
+		} else {
+			dev->driver->irq_preinstall = i965_irq_preinstall;
+			dev->driver->irq_postinstall = i965_irq_postinstall;
+			dev->driver->irq_uninstall = i965_irq_uninstall;
+			dev->driver->irq_handler = i965_irq_handler;
+		}
 		dev->driver->enable_vblank = i915_enable_vblank;
 		dev->driver->disable_vblank = i915_disable_vblank;
 	}
@@ -1918,7 +2266,8 @@
 		if (d == NULL)
 			goto unwind;
 
-		if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+		    src->has_global_gtt_mapping) {
 			/* Simply ignore tiling or any overlapping fence.
 			 * It's part of the error state, and this hopefully
 			 * captures what the GPU read.
@@ -1976,13 +2325,12 @@
 	free(obj, DRM_I915_GEM);
 }
 
-static void
-i915_error_state_free(struct drm_device *dev,
-		      struct drm_i915_error_state *error)
+void
+i915_error_state_free(struct drm_i915_error_state *error)
 {
 	int i;
 
-	for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 		i915_error_object_free(error->ring[i].batchbuffer);
 		i915_error_object_free(error->ring[i].ringbuffer);
 		free(error->ring[i].requests, DRM_I915_GEM);
@@ -1993,39 +2341,59 @@
 	free(error, DRM_I915_GEM);
 }
 
-static u32
-capture_bo_list(struct drm_i915_error_buffer *err, int count,
-    struct list_head *head)
+static void capture_bo(struct drm_i915_error_buffer *err,
+		       struct drm_i915_gem_object *obj)
 {
+	err->size = obj->base.size;
+	err->name = obj->base.name;
+	err->seqno = obj->last_rendering_seqno;
+	err->gtt_offset = obj->gtt_offset;
+	err->read_domains = obj->base.read_domains;
+	err->write_domain = obj->base.write_domain;
+	err->fence_reg = obj->fence_reg;
+	err->pinned = 0;
+	if (obj->pin_count > 0)
+		err->pinned = 1;
+	if (obj->user_pin_count > 0)
+		err->pinned = -1;
+	err->tiling = obj->tiling_mode;
+	err->dirty = obj->dirty;
+	err->purgeable = obj->madv != I915_MADV_WILLNEED;
+	err->ring = obj->ring ? obj->ring->id : -1;
+	err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+			     int count, struct list_head *head)
+{
 	struct drm_i915_gem_object *obj;
 	int i = 0;
 
 	list_for_each_entry(obj, head, mm_list) {
-		err->size = obj->base.size;
-		err->name = obj->base.name;
-		err->seqno = obj->last_rendering_seqno;
-		err->gtt_offset = obj->gtt_offset;
-		err->read_domains = obj->base.read_domains;
-		err->write_domain = obj->base.write_domain;
-		err->fence_reg = obj->fence_reg;
-		err->pinned = 0;
-		if (obj->pin_count > 0)
-			err->pinned = 1;
-		if (obj->user_pin_count > 0)
-			err->pinned = -1;
-		err->tiling = obj->tiling_mode;
-		err->dirty = obj->dirty;
-		err->purgeable = obj->madv != I915_MADV_WILLNEED;
-		err->ring = obj->ring ? obj->ring->id : -1;
-		err->cache_level = obj->cache_level;
+		capture_bo(err++, obj);
+		if (++i == count)
+			break;
+	}
 
+	return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+			     int count, struct list_head *head)
+{
+	struct drm_i915_gem_object *obj;
+	int i = 0;
+
+	list_for_each_entry(obj, head, gtt_list) {
+		if (obj->pin_count == 0)
+			continue;
+
+		capture_bo(err++, obj);
 		if (++i == count)
 			break;
-
-		err++;
 	}
 
-	return (i);
+	return i;
 }
 
 static void
@@ -2099,7 +2467,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (INTEL_INFO(dev)->gen >= 6) {
-		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
 		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
 		error->semaphore_mboxes[ring->id][0]
 			= I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -2108,6 +2475,7 @@
 	}
 
 	if (INTEL_INFO(dev)->gen >= 4) {
+		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
 		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
 		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
 		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
@@ -2117,11 +2485,15 @@
 			error->bbaddr = I915_READ64(BB_ADDR);
 		}
 	} else {
+		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 		error->ipeir[ring->id] = I915_READ(IPEIR);
 		error->ipehr[ring->id] = I915_READ(IPEHR);
 		error->instdone[ring->id] = I915_READ(INSTDONE);
 	}
 
+	sleepq_lock(ring);
+	error->waiting[ring->id] = sleepq_sleepcnt(ring, 0) != 0;
+	sleepq_release(ring);
 	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
 	error->seqno[ring->id] = ring->get_seqno(ring);
 	error->acthd[ring->id] = intel_ring_get_active_head(ring);
@@ -2137,15 +2509,11 @@
     struct drm_i915_error_state *error)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	struct drm_i915_gem_request *request;
 	int i, count;
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_ring_buffer *ring = &dev_priv->rings[i];
-
-		if (ring->obj == NULL)
-			continue;
-
+	for_each_ring(ring, dev_priv, i) {
 		i915_record_ring_state(dev, error, ring);
 
 		error->ring[i].batchbuffer =
@@ -2203,8 +2571,19 @@
 	DRM_INFO("capturing error event; look for more information in "
 	    "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
 
+	refcount_init(&error->ref, 1);
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
+
+	if (HAS_PCH_SPLIT(dev))
+		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+	else if (IS_VALLEYVIEW(dev))
+		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+	else if (IS_GEN2(dev))
+		error->ier = I915_READ16(IER);
+	else
+		error->ier = I915_READ(IER);
+
 	for_each_pipe(pipe)
 		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
@@ -2224,8 +2603,9 @@
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
 		i++;
 	error->active_bo_count = i;
-	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
-		i++;
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, mm_list)
+		if (obj->pin_count)
+			i++;
 	error->pinned_bo_count = i - error->active_bo_count;
 
 	error->active_bo = NULL;
@@ -2239,12 +2619,16 @@
 	}
 
 	if (error->active_bo)
-		error->active_bo_count = capture_bo_list(error->active_bo,
-		    error->active_bo_count, &dev_priv->mm.active_list);
+		error->active_bo_count =
+			capture_active_bo(error->active_bo,
+					  error->active_bo_count,
+					  &dev_priv->mm.active_list);
 
 	if (error->pinned_bo)
-		error->pinned_bo_count = capture_bo_list(error->pinned_bo,
-		    error->pinned_bo_count, &dev_priv->mm.pinned_list);
+		error->pinned_bo_count =
+			capture_pinned_bo(error->pinned_bo,
+					  error->pinned_bo_count,
+					  &dev_priv->mm.gtt_list);
 
 	microtime(&error->time);
 
@@ -2259,7 +2643,7 @@
 	mtx_unlock(&dev_priv->error_lock);
 
 	if (error != NULL)
-		i915_error_state_free(dev, error);
+		i915_error_state_free(error);
 }
 
 void
@@ -2273,6 +2657,6 @@
 	dev_priv->first_error = NULL;
 	mtx_unlock(&dev_priv->error_lock);
 
-	if (error != NULL)
-		i915_error_state_free(dev, error);
+	if (error != NULL && refcount_release(&error->ref))
+		i915_error_state_free(error);
 }

Modified: trunk/sys/dev/drm2/i915/i915_reg.h
===================================================================
--- trunk/sys/dev/drm2/i915/i915_reg.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_reg.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  * All Rights Reserved.
  *
@@ -23,7 +24,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_reg.h 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_reg.h 280369 2015-03-23 13:38:33Z kib $");
 
 #ifndef _I915_REG_H_
 #define _I915_REG_H_
@@ -30,6 +31,11 @@
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
 
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+
 /*
  * The Bridge device's PCI config space has information about the
  * fb aperture size and the amount of pre-reserved memory.
@@ -80,6 +86,7 @@
 #define  GRDOM_FULL	(0<<2)
 #define  GRDOM_RENDER	(1<<2)
 #define  GRDOM_MEDIA	(3<<2)
+#define  GRDOM_RESET_ENABLE (1<<0)
 
 #define GEN6_MBCUNIT_SNPCR	0x900c /* for LLC config */
 #define   GEN6_MBC_SNPCR_SHIFT	21
@@ -128,6 +135,13 @@
 #define   ECOCHK_PPGTT_CACHE64B		(0x3<<3)
 #define   ECOCHK_PPGTT_CACHE4B		(0x0<<3)
 
+#define GAC_ECO_BITS			0x14090
+#define   ECOBITS_PPGTT_CACHE64B	(3<<8)
+#define   ECOBITS_PPGTT_CACHE4B		(0<<8)
+
+#define GAB_CTL				0x24000
+#define   GAB_CTL_CONT_AFTER_PAGEFAULT	(1<<8)
+
 /* VGA stuff */
 
 #define VGA_ST01_MDA 0x3ba
@@ -200,6 +214,10 @@
 #define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
 #define MI_DISPLAY_FLIP_I915	MI_INSTR(0x14, 1)
 #define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+#define MI_ARB_ON_OFF		MI_INSTR(0x08, 0)
+#define   MI_ARB_ENABLE			(1<<0)
+#define   MI_ARB_DISABLE		(0<<0)
+
 #define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
 #define   MI_MM_SPACE_GTT		(1<<8)
 #define   MI_MM_SPACE_PHYSICAL		(0<<8)
@@ -225,6 +243,7 @@
 #define   MI_BATCH_NON_SECURE	(1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+#define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
 #define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
 #define  MI_SEMAPHORE_UPDATE	    (1<<21)
@@ -304,7 +323,62 @@
 #define  DEBUG_RESET_RENDER		(1<<8)
 #define  DEBUG_RESET_DISPLAY		(1<<9)
 
+/*
+ * DPIO - a special bus for various display related registers to hide behind:
+ *  0x800c: m1, m2, n, p1, p2, k dividers
+ *  0x8014: REF and SFR select
+ *  0x8014: N divider, VCO select
+ *  0x801c/3c: core clock bits
+ *  0x8048/68: low pass filter coefficients
+ *  0x8100: fast clock controls
+ */
+#define DPIO_PKT			0x2100
+#define  DPIO_RID			(0<<24)
+#define  DPIO_OP_WRITE			(1<<16)
+#define  DPIO_OP_READ			(0<<16)
+#define  DPIO_PORTID			(0x12<<8)
+#define  DPIO_BYTE			(0xf<<4)
+#define  DPIO_BUSY			(1<<0) /* status only */
+#define DPIO_DATA			0x2104
+#define DPIO_REG			0x2108
+#define DPIO_CTL			0x2110
+#define  DPIO_MODSEL1			(1<<3) /* if ref clk b == 27 */
+#define  DPIO_MODSEL0			(1<<2) /* if ref clk a == 27 */
+#define  DPIO_SFR_BYPASS		(1<<1)
+#define  DPIO_RESET			(1<<0)
 
+#define _DPIO_DIV_A			0x800c
+#define   DPIO_POST_DIV_SHIFT		(28) /* 3 bits */
+#define   DPIO_K_SHIFT			(24) /* 4 bits */
+#define   DPIO_P1_SHIFT			(21) /* 3 bits */
+#define   DPIO_P2_SHIFT			(16) /* 5 bits */
+#define   DPIO_N_SHIFT			(12) /* 4 bits */
+#define   DPIO_ENABLE_CALIBRATION	(1<<11)
+#define   DPIO_M1DIV_SHIFT		(8) /* 3 bits */
+#define   DPIO_M2DIV_MASK		0xff
+#define _DPIO_DIV_B			0x802c
+#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+
+#define _DPIO_REFSFR_A			0x8014
+#define   DPIO_REFSEL_OVERRIDE		27
+#define   DPIO_PLL_MODESEL_SHIFT	24 /* 3 bits */
+#define   DPIO_BIAS_CURRENT_CTL_SHIFT	21 /* 3 bits, always 0x7 */
+#define   DPIO_PLL_REFCLK_SEL_SHIFT	16 /* 2 bits */
+#define   DPIO_DRIVER_CTL_SHIFT		12 /* always set to 0x8 */
+#define   DPIO_CLK_BIAS_CTL_SHIFT	8 /* always set to 0x5 */
+#define _DPIO_REFSFR_B			0x8034
+#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+
+#define _DPIO_CORE_CLK_A		0x801c
+#define _DPIO_CORE_CLK_B		0x803c
+#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+
+#define _DPIO_LFP_COEFF_A		0x8048
+#define _DPIO_LFP_COEFF_B		0x8068
+#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
+
+#define DPIO_FASTCLK_DISABLE		0x8100
+
 /*
  * Fence registers
  */
@@ -363,8 +437,6 @@
 #define ARB_MODE		0x04030
 #define   ARB_MODE_SWIZZLE_SNB	(1<<4)
 #define   ARB_MODE_SWIZZLE_IVB	(1<<5)
-#define   ARB_MODE_ENABLE(x)	GFX_MODE_ENABLE(x)
-#define   ARB_MODE_DISABLE(x)	GFX_MODE_DISABLE(x)
 #define RENDER_HWS_PGA_GEN7	(0x04080)
 #define RING_FAULT_REG(ring)	(0x4094 + 0x100*(ring)->id)
 #define DONE_REG		0x40b0
@@ -420,6 +492,7 @@
 #define INSTDONE	0x02090
 #define NOPID		0x02094
 #define HWSTAM		0x02098
+#define DMA_FADD_I8XX	0x020d0
 
 #define ERROR_GEN6	0x040a0
 
@@ -435,6 +508,7 @@
  */
 # define _3D_CHICKEN2_WM_READ_PIPELINED			(1 << 14)
 #define _3D_CHICKEN3	0x02090
+#define  _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
 
 #define MI_MODE		0x0209c
 # define VS_TIMER_DISPATCH				(1 << 6)
@@ -450,14 +524,16 @@
 #define   GFX_PSMI_GRANULARITY		(1<<10)
 #define   GFX_PPGTT_ENABLE		(1<<9)
 
-#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
-#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
-
 #define SCPD0		0x0209c /* 915+ only */
 #define IER		0x020a0
 #define IIR		0x020a4
 #define IMR		0x020a8
 #define ISR		0x020ac
+#define VLV_IIR_RW	0x182084
+#define VLV_IER		0x1820a0
+#define VLV_IIR		0x1820a4
+#define VLV_IMR		0x1820a8
+#define VLV_ISR		0x1820ac
 #define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
 #define   I915_DISPLAY_PORT_INTERRUPT			(1<<17)
 #define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT	(1<<15)
@@ -503,7 +579,6 @@
 #define LM_BURST_LENGTH     0x00000700
 #define LM_FIFO_WATERMARK   0x0000001F
 #define MI_ARB_STATE	0x020e4 /* 915+ only */
-#define   MI_ARB_MASK_SHIFT	  16	/* shift for enable bits */
 
 /* Make render/texture TLB fetches lower priorty than associated data
  *   fetches. This is not turned on by default
@@ -568,7 +643,6 @@
 #define   MI_ARB_DISPLAY_PRIORITY_B_A		(1 << 0)	/* display B > display A */
 
 #define CACHE_MODE_0	0x02120 /* 915+ only */
-#define   CM0_MASK_SHIFT          16
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
 #define   CM0_ZR_OPT_DISABLE      (1<<5)
 #define	  CM0_STC_EVICT_DISABLE_LRA_SNB	(1<<5)
@@ -582,7 +656,12 @@
 #define   ECO_GATING_CX_ONLY	(1<<3)
 #define   ECO_FLIP_DONE		(1<<0)
 
-/* GEN6 interrupt control */
+#define CACHE_MODE_1		0x7004 /* IVB+ */
+#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+
+/* GEN6 interrupt control
+ * Note that the per-ring interrupt bits do alias with the global interrupt bits
+ * in GTIMR. */
 #define GEN6_RENDER_HWSTAM	0x2098
 #define GEN6_RENDER_IMR		0x20a8
 #define   GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT		(1 << 8)
@@ -618,6 +697,21 @@
 
 #define GEN6_BSD_RNCID			0x12198
 
+#define GEN7_FF_THREAD_MODE		0x20a0
+#define   GEN7_FF_SCHED_MASK		0x0077070
+#define   GEN7_FF_TS_SCHED_HS1		(0x5<<16)
+#define   GEN7_FF_TS_SCHED_HS0		(0x3<<16)
+#define   GEN7_FF_TS_SCHED_LOAD_BALANCE	(0x1<<16)
+#define   GEN7_FF_TS_SCHED_HW		(0x0<<16) /* Default */
+#define   GEN7_FF_VS_SCHED_HS1		(0x5<<12)
+#define   GEN7_FF_VS_SCHED_HS0		(0x3<<12)
+#define   GEN7_FF_VS_SCHED_LOAD_BALANCE	(0x1<<12) /* Default */
+#define   GEN7_FF_VS_SCHED_HW		(0x0<<12)
+#define   GEN7_FF_DS_SCHED_HS1		(0x5<<4)
+#define   GEN7_FF_DS_SCHED_HS0		(0x3<<4)
+#define   GEN7_FF_DS_SCHED_LOAD_BALANCE	(0x1<<4)  /* Default */
+#define   GEN7_FF_DS_SCHED_HW		(0x0<<4)
+
 /*
  * Framebuffer compression (915+ only)
  */
@@ -746,9 +840,9 @@
 #define   GMBUS_PORT_PANEL	3
 #define   GMBUS_PORT_DPC	4 /* HDMIC */
 #define   GMBUS_PORT_DPB	5 /* SDVO, HDMIB */
-				  /* 6 reserved */
-#define   GMBUS_PORT_DPD	7 /* HDMID */
-#define   GMBUS_NUM_PORTS       8
+#define   GMBUS_PORT_DPD	6 /* HDMID */
+#define   GMBUS_PORT_RESERVED	7 /* 7 reserved */
+#define   GMBUS_NUM_PORTS	(GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
 #define GMBUS1			0x5104 /* command/status */
 #define   GMBUS_SW_CLR_INT	(1<<31)
 #define   GMBUS_SW_RDY		(1<<30)
@@ -798,9 +892,11 @@
 #define _DPLL_A	0x06014
 #define _DPLL_B	0x06018
 #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
-#define   DPLL_VCO_ENABLE		(1 << 31)
+#define   DPLL_VCO_ENABLE		(1U << 31)
 #define   DPLL_DVO_HIGH_SPEED		(1 << 30)
+#define   DPLL_EXT_BUFFER_ENABLE_VLV	(1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
+#define   DPLL_REFA_CLK_ENABLE_VLV	(1 << 29)
 #define   DPLL_VGA_MODE_DIS		(1 << 28)
 #define   DPLLB_MODE_DAC_SERIAL		(1 << 26) /* i915 */
 #define   DPLLB_MODE_LVDS		(2 << 26) /* i915 */
@@ -812,6 +908,7 @@
 #define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
+#define   DPLL_INTEGRATED_CLOCK_VLV	(1<<13)
 
 #define SRX_INDEX		0x3c4
 #define SRX_DATA		0x3c5
@@ -907,6 +1004,7 @@
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
 #define _DPLL_B_MD 0x06020 /* 965+ only */
 #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+
 #define _FPA0	0x06040
 #define _FPA1	0x06044
 #define _FPB0	0x06048
@@ -1047,6 +1145,9 @@
 #define RAMCLK_GATE_D		0x6210		/* CRL only */
 #define DEUC			0x6214          /* CRL only */
 
+#define FW_BLC_SELF_VLV		0x6500
+#define  FW_CSPWRDWNEN		(1<<15)
+
 /*
  * Palette regs
  */
@@ -1361,6 +1462,31 @@
  */
 #define CCID			0x2180
 #define   CCID_EN		(1<<0)
+#define CXT_SIZE		0x21a0
+#define GEN6_CXT_POWER_SIZE(cxt_reg)	((cxt_reg >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg)	((cxt_reg >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg)	((cxt_reg >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg)	((cxt_reg >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg)	((cxt_reg >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg)	(GEN6_CXT_POWER_SIZE(cxt_reg) + \
+					GEN6_CXT_RING_SIZE(cxt_reg) + \
+					GEN6_CXT_RENDER_SIZE(cxt_reg) + \
+					GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+					GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE		0x21a8
+#define GEN7_CXT_POWER_SIZE(ctx_reg)	((ctx_reg >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg)	((ctx_reg >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg)	((ctx_reg >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg)	((ctx_reg >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg)	((ctx_reg >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg)	((ctx_reg >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg)	(GEN7_CXT_POWER_SIZE(ctx_reg) + \
+					 GEN7_CXT_RING_SIZE(ctx_reg) + \
+					 GEN7_CXT_RENDER_SIZE(ctx_reg) + \
+					 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+					 GEN7_CXT_GT1_SIZE(ctx_reg) + \
+					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
 /*
  * Overlay regs
  */
@@ -1483,7 +1609,7 @@
 /* SDVO port control */
 #define SDVOB			0x61140
 #define SDVOC			0x61160
-#define   SDVO_ENABLE		(1 << 31)
+#define   SDVO_ENABLE		(1U << 31)
 #define   SDVO_PIPE_B_SELECT	(1 << 30)
 #define   SDVO_STALL_SELECT	(1 << 29)
 #define   SDVO_INTERRUPT_ENABLE	(1 << 26)
@@ -1521,7 +1647,7 @@
 #define DVOA			0x61120
 #define DVOB			0x61140
 #define DVOC			0x61160
-#define   DVO_ENABLE			(1 << 31)
+#define   DVO_ENABLE			(1U << 31)
 #define   DVO_PIPE_B_SELECT		(1 << 30)
 #define   DVO_PIPE_STALL_UNUSED		(0 << 28)
 #define   DVO_PIPE_STALL		(1 << 28)
@@ -1557,7 +1683,7 @@
  * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
  * the DPLL semantics change when the LVDS is assigned to that pipe.
  */
-#define   LVDS_PORT_EN			(1 << 31)
+#define   LVDS_PORT_EN			(1U << 31)
 /* Selects pipe B for LVDS data.  Must be set on pre-965. */
 #define   LVDS_PIPEB_SELECT		(1 << 30)
 #define   LVDS_PIPE_MASK		(1 << 30)
@@ -1604,9 +1730,12 @@
 /* Video Data Island Packet control */
 #define VIDEO_DIP_DATA		0x61178
 #define VIDEO_DIP_CTL		0x61170
-#define   VIDEO_DIP_ENABLE		(1 << 31)
+/* Pre HSW: */
+#define   VIDEO_DIP_ENABLE		(1U << 31)
 #define   VIDEO_DIP_PORT_B		(1 << 29)
 #define   VIDEO_DIP_PORT_C		(2 << 29)
+#define   VIDEO_DIP_PORT_D		(3 << 29)
+#define   VIDEO_DIP_PORT_MASK		(3 << 29)
 #define   VIDEO_DIP_ENABLE_AVI		(1 << 21)
 #define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21)
 #define   VIDEO_DIP_ENABLE_SPD		(8 << 21)
@@ -1617,10 +1746,14 @@
 #define   VIDEO_DIP_FREQ_ONCE		(0 << 16)
 #define   VIDEO_DIP_FREQ_VSYNC		(1 << 16)
 #define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16)
+#define   VIDEO_DIP_FREQ_MASK		(3 << 16)
+/* HSW and later: */
+#define   VIDEO_DIP_ENABLE_AVI_HSW	(1 << 12)
+#define   VIDEO_DIP_ENABLE_SPD_HSW	(1 << 0)
 
 /* Panel power sequencing */
 #define PP_STATUS	0x61200
-#define   PP_ON		(1 << 31)
+#define   PP_ON		(1U << 31)
 /*
  * Indicates that all dependencies of the panel are on:
  *
@@ -1653,7 +1786,7 @@
 
 /* Panel fitting */
 #define PFIT_CONTROL	0x61230
-#define   PFIT_ENABLE		(1 << 31)
+#define   PFIT_ENABLE		(1U << 31)
 #define   PFIT_PIPE_MASK	(3 << 29)
 #define   PFIT_PIPE_SHIFT	29
 #define   VERT_INTERP_DISABLE	(0 << 10)
@@ -1714,7 +1847,7 @@
 /* TV port control */
 #define TV_CTL			0x68000
 /** Enables the TV encoder */
-# define TV_ENC_ENABLE			(1 << 31)
+# define TV_ENC_ENABLE			(1U << 31)
 /** Sources the TV encoder input from pipe B instead of A. */
 # define TV_ENC_PIPEB_SELECT		(1 << 30)
 /** Outputs composite video (DAC A only) */
@@ -1786,7 +1919,7 @@
  *
  * This gets cleared when TV_DAC_STATE_EN is cleared
 */
-# define TVDAC_STATE_CHG		(1 << 31)
+# define TVDAC_STATE_CHG		(1U << 31)
 # define TVDAC_SENSE_MASK		(7 << 28)
 /** Reports that DAC A voltage is above the detect threshold */
 # define TVDAC_A_SENSE			(1 << 30)
@@ -1913,7 +2046,7 @@
 
 #define TV_H_CTL_2		0x68034
 /** Enables the colorburst (needed for non-component color) */
-# define TV_BURST_ENA			(1 << 31)
+# define TV_BURST_ENA			(1U << 31)
 /** Offset of the colorburst from the start of hsync, in pixels minus one. */
 # define TV_HBURST_START_SHIFT		16
 # define TV_HBURST_START_MASK		0x1fff0000
@@ -1958,7 +2091,7 @@
 
 #define TV_V_CTL_3		0x68044
 /** Enables generation of the equalization signal */
-# define TV_EQUAL_ENA			(1 << 31)
+# define TV_EQUAL_ENA			(1U << 31)
 /** Length of vsync, in half lines */
 # define TV_VEQ_LEN_MASK		0x007f0000
 # define TV_VEQ_LEN_SHIFT		16
@@ -2032,7 +2165,7 @@
 
 #define TV_SC_CTL_1		0x68060
 /** Turns on the first subcarrier phase generation DDA */
-# define TV_SC_DDA1_EN			(1 << 31)
+# define TV_SC_DDA1_EN			(1U << 31)
 /** Turns on the first subcarrier phase generation DDA */
 # define TV_SC_DDA2_EN			(1 << 30)
 /** Turns on the first subcarrier phase generation DDA */
@@ -2095,7 +2228,7 @@
  * If set, the rest of the registers are ignored, and the calculated values can
  * be read back from the register.
  */
-# define TV_AUTO_SCALE			(1 << 31)
+# define TV_AUTO_SCALE			(1U << 31)
 /**
  * Disables the vertical filter.
  *
@@ -2158,7 +2291,7 @@
 # define TV_VSCALE_IP_FRAC_SHIFT		0
 
 #define TV_CC_CONTROL		0x68090
-# define TV_CC_ENABLE			(1 << 31)
+# define TV_CC_ENABLE			(1U << 31)
 /**
  * Specifies which field to send the CC data in.
  *
@@ -2174,7 +2307,7 @@
 # define TV_CC_LINE_SHIFT		0
 
 #define TV_CC_DATA		0x68094
-# define TV_CC_RDY			(1 << 31)
+# define TV_CC_RDY			(1U << 31)
 /** Second word of CC data to be transmitted. */
 # define TV_CC_DATA_2_MASK		0x007f0000
 # define TV_CC_DATA_2_SHIFT		16
@@ -2197,7 +2330,7 @@
 #define DP_C				0x64200
 #define DP_D				0x64300
 
-#define   DP_PORT_EN			(1 << 31)
+#define   DP_PORT_EN			(1U << 31)
 #define   DP_PIPEB_SELECT		(1 << 30)
 #define   DP_PIPE_MASK			(1 << 30)
 
@@ -2307,7 +2440,7 @@
 #define DPD_AUX_CH_DATA4		0x64320
 #define DPD_AUX_CH_DATA5		0x64324
 
-#define   DP_AUX_CH_CTL_SEND_BUSY	    (1 << 31)
+#define   DP_AUX_CH_CTL_SEND_BUSY	    (1U << 31)
 #define   DP_AUX_CH_CTL_DONE		    (1 << 30)
 #define   DP_AUX_CH_CTL_INTERRUPT	    (1 << 29)
 #define   DP_AUX_CH_CTL_TIME_OUT_ERROR	    (1 << 28)
@@ -2383,7 +2516,8 @@
 
 /* Pipe A */
 #define _PIPEADSL		0x70000
-#define   DSL_LINEMASK		0x00000fff
+#define   DSL_LINEMASK_GEN2	0x00000fff
+#define   DSL_LINEMASK_GEN3	0x00001fff
 #define _PIPEACONF		0x70008
 #define   PIPECONF_ENABLE	(1<<31)
 #define   PIPECONF_DISABLE	0
@@ -2425,13 +2559,16 @@
 #define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
 #define _PIPEASTAT		0x70024
 #define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
+#define   SPRITE1_FLIPDONE_INT_EN_VLV		(1UL<<30)
 #define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
 #define   PIPE_CRC_DONE_ENABLE			(1UL<<28)
 #define   PIPE_GMBUS_EVENT_ENABLE		(1UL<<27)
+#define   PLANE_FLIP_DONE_INT_EN_VLV		(1UL<<26)
 #define   PIPE_HOTPLUG_INTERRUPT_ENABLE		(1UL<<26)
 #define   PIPE_VSYNC_INTERRUPT_ENABLE		(1UL<<25)
 #define   PIPE_DISPLAY_LINE_COMPARE_ENABLE	(1UL<<24)
 #define   PIPE_DPST_EVENT_ENABLE		(1UL<<23)
+#define   SPRITE0_FLIP_DONE_INT_EN_VLV		(1UL<<26)
 #define   PIPE_LEGACY_BLC_EVENT_ENABLE		(1UL<<22)
 #define   PIPE_ODD_FIELD_INTERRUPT_ENABLE	(1UL<<21)
 #define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE	(1UL<<20)
@@ -2438,10 +2575,14 @@
 #define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE	(1UL<<18) /* pre-965 */
 #define   PIPE_START_VBLANK_INTERRUPT_ENABLE	(1UL<<18) /* 965 or later */
 #define   PIPE_VBLANK_INTERRUPT_ENABLE		(1UL<<17)
+#define   PIPEA_HBLANK_INT_EN_VLV		(1UL<<16)
 #define   PIPE_OVERLAY_UPDATED_ENABLE		(1UL<<16)
+#define   SPRITE1_FLIPDONE_INT_STATUS_VLV	(1UL<<15)
+#define   SPRITE0_FLIPDONE_INT_STATUS_VLV	(1UL<<15)
 #define   PIPE_CRC_ERROR_INTERRUPT_STATUS	(1UL<<13)
 #define   PIPE_CRC_DONE_INTERRUPT_STATUS	(1UL<<12)
 #define   PIPE_GMBUS_INTERRUPT_STATUS		(1UL<<11)
+#define   PLANE_FLIPDONE_INT_STATUS_VLV		(1UL<<10)
 #define   PIPE_HOTPLUG_INTERRUPT_STATUS		(1UL<<10)
 #define   PIPE_VSYNC_INTERRUPT_STATUS		(1UL<<9)
 #define   PIPE_DISPLAY_LINE_COMPARE_STATUS	(1UL<<8)
@@ -2466,6 +2607,40 @@
 #define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
 #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
 
+#define VLV_DPFLIPSTAT				0x70028
+#define   PIPEB_LINE_COMPARE_STATUS		(1<<29)
+#define   PIPEB_HLINE_INT_EN			(1<<28)
+#define   PIPEB_VBLANK_INT_EN			(1<<27)
+#define   SPRITED_FLIPDONE_INT_EN		(1<<26)
+#define   SPRITEC_FLIPDONE_INT_EN		(1<<25)
+#define   PLANEB_FLIPDONE_INT_EN		(1<<24)
+#define   PIPEA_LINE_COMPARE_STATUS		(1<<21)
+#define   PIPEA_HLINE_INT_EN			(1<<20)
+#define   PIPEA_VBLANK_INT_EN			(1<<19)
+#define   SPRITEB_FLIPDONE_INT_EN		(1<<18)
+#define   SPRITEA_FLIPDONE_INT_EN		(1<<17)
+#define   PLANEA_FLIPDONE_INT_EN		(1<<16)
+
+#define DPINVGTT				0x7002c /* VLV only */
+#define   CURSORB_INVALID_GTT_INT_EN		(1<<23)
+#define   CURSORA_INVALID_GTT_INT_EN		(1<<22)
+#define   SPRITED_INVALID_GTT_INT_EN		(1<<21)
+#define   SPRITEC_INVALID_GTT_INT_EN		(1<<20)
+#define   PLANEB_INVALID_GTT_INT_EN		(1<<19)
+#define   SPRITEB_INVALID_GTT_INT_EN		(1<<18)
+#define   SPRITEA_INVALID_GTT_INT_EN		(1<<17)
+#define   PLANEA_INVALID_GTT_INT_EN		(1<<16)
+#define   DPINVGTT_EN_MASK			0xff0000
+#define   CURSORB_INVALID_GTT_STATUS		(1<<7)
+#define   CURSORA_INVALID_GTT_STATUS		(1<<6)
+#define   SPRITED_INVALID_GTT_STATUS		(1<<5)
+#define   SPRITEC_INVALID_GTT_STATUS		(1<<4)
+#define   PLANEB_INVALID_GTT_STATUS		(1<<3)
+#define   SPRITEB_INVALID_GTT_STATUS		(1<<2)
+#define   SPRITEA_INVALID_GTT_STATUS		(1<<1)
+#define   PLANEA_INVALID_GTT_STATUS		(1<<0)
+#define   DPINVGTT_STATUS_MASK			0xff
+
 #define DSPARB			0x70030
 #define   DSPARB_CSTART_MASK	(0x7f << 7)
 #define   DSPARB_CSTART_SHIFT	7
@@ -2495,11 +2670,28 @@
 #define   DSPFW_HPLL_CURSOR_MASK	(0x3f<<16)
 #define   DSPFW_HPLL_SR_MASK		(0x1ff)
 
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32	32
+#define DRAIN_LATENCY_PRECISION_16	16
+#define VLV_DDL1			0x70050
+#define DDL_CURSORA_PRECISION_32	(1<<31)
+#define DDL_CURSORA_PRECISION_16	(0<<31)
+#define DDL_CURSORA_SHIFT		24
+#define DDL_PLANEA_PRECISION_32		(1<<7)
+#define DDL_PLANEA_PRECISION_16		(0<<7)
+#define VLV_DDL2			0x70054
+#define DDL_CURSORB_PRECISION_32	(1<<31)
+#define DDL_CURSORB_PRECISION_16	(0<<31)
+#define DDL_CURSORB_SHIFT		24
+#define DDL_PLANEB_PRECISION_32		(1<<7)
+#define DDL_PLANEB_PRECISION_16		(0<<7)
+
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE	64
 #define I915_FIFO_LINE_SIZE	64
 #define I830_FIFO_LINE_SIZE	32
 
+#define VALLEYVIEW_FIFO_SIZE	255
 #define G4X_FIFO_SIZE		127
 #define I965_FIFO_SIZE		512
 #define I945_FIFO_SIZE		127
@@ -2507,6 +2699,7 @@
 #define I855GM_FIFO_SIZE	127 /* In cachelines */
 #define I830_FIFO_SIZE		95
 
+#define VALLEYVIEW_MAX_WM	0xff
 #define G4X_MAX_WM		0x3f
 #define I915_MAX_WM		0x3f
 
@@ -2521,6 +2714,7 @@
 #define PINEVIEW_CURSOR_DFT_WM	0
 #define PINEVIEW_CURSOR_GUARD_WM	5
 
+#define VALLEYVIEW_CURSOR_MAX_WM 64
 #define I965_CURSOR_FIFO	64
 #define I965_CURSOR_MAX_WM	32
 #define I965_CURSOR_DFT_WM	8
@@ -2729,6 +2923,13 @@
 #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
 #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
 
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK	(0xfffff000)
+#define I915_LO_DISPBASE(val)	(val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val)	(val & DISP_BASEADDR_MASK)
+#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
+		(I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
+
 /* VBIOS flags */
 #define SWF00			0x71410
 #define SWF01			0x71414
@@ -2903,7 +3104,7 @@
 
 /* VBIOS regs */
 #define VGACNTRL		0x71400
-# define VGA_DISP_DISABLE			(1 << 31)
+# define VGA_DISP_DISABLE			(1U << 31)
 # define VGA_2X_MODE				(1 << 30)
 # define VGA_PIPE_B_SELECT			(1 << 29)
 
@@ -3029,7 +3230,7 @@
 #define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
 
 /* interrupts */
-#define DE_MASTER_IRQ_CONTROL   (1 << 31)
+#define DE_MASTER_IRQ_CONTROL   (1U << 31)
 #define DE_SPRITEB_FLIP_DONE    (1 << 29)
 #define DE_SPRITEA_FLIP_DONE    (1 << 28)
 #define DE_PLANEB_FLIP_DONE     (1 << 27)
@@ -3061,26 +3262,38 @@
 #define DE_PCH_EVENT_IVB		(1<<28)
 #define DE_DP_A_HOTPLUG_IVB		(1<<27)
 #define DE_AUX_CHANNEL_A_IVB		(1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB	(1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB		(1<<13)
+#define DE_PIPEC_VBLANK_IVB		(1<<10)
 #define DE_SPRITEB_FLIP_DONE_IVB	(1<<9)
+#define DE_PLANEB_FLIP_DONE_IVB		(1<<8)
+#define DE_PIPEB_VBLANK_IVB		(1<<5)
 #define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
-#define DE_PLANEB_FLIP_DONE_IVB		(1<<8)
 #define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
-#define DE_PIPEB_VBLANK_IVB		(1<<5)
 #define DE_PIPEA_VBLANK_IVB		(1<<0)
 
+#define VLV_MASTER_IER			0x4400c /* Gunit master IER */
+#define   MASTER_INTERRUPT_ENABLE	(1<<31)
+
 #define DEISR   0x44000
 #define DEIMR   0x44004
 #define DEIIR   0x44008
 #define DEIER   0x4400c
 
-/* GT interrupt */
-#define GT_PIPE_NOTIFY		(1 << 4)
-#define GT_RENDER_CS_ERROR	(1 << 3)
-#define GT_SYNC_STATUS          (1 << 2)
-#define GT_USER_INTERRUPT       (1 << 0)
-#define GT_BSD_USER_INTERRUPT   (1 << 5)
-#define GT_GEN6_BSD_USER_INTERRUPT	(1 << 12)
-#define GT_BLT_USER_INTERRUPT	(1 << 22)
+/* GT interrupt.
+ * Note that for gen6+ the ring-specific interrupt bits do alias with the
+ * corresponding bits in the per-ring interrupt control registers. */
+#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT	(1 << 26)
+#define GT_GEN6_BLT_CS_ERROR_INTERRUPT		(1 << 25)
+#define GT_GEN6_BLT_USER_INTERRUPT		(1 << 22)
+#define GT_GEN6_BSD_CS_ERROR_INTERRUPT		(1 << 15)
+#define GT_GEN6_BSD_USER_INTERRUPT		(1 << 12)
+#define GT_BSD_USER_INTERRUPT			(1 << 5) /* ilk only */
+#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT	(1 << 5)
+#define GT_PIPE_NOTIFY				(1 << 4)
+#define GT_RENDER_CS_ERROR_INTERRUPT		(1 << 3)
+#define GT_SYNC_STATUS				(1 << 2)
+#define GT_USER_INTERRUPT			(1 << 0)
 
 #define GTISR   0x44010
 #define GTIMR   0x44014
@@ -3230,7 +3443,7 @@
 
 #define _PCH_DPLL_A              0xc6014
 #define _PCH_DPLL_B              0xc6018
-#define PCH_DPLL(pipe) (pipe == 0 ?  _PCH_DPLL_A : _PCH_DPLL_B)
+#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
 
 #define _PCH_FPA0                0xc6040
 #define  FP_CB_TUNE		(0x3<<22)
@@ -3237,8 +3450,8 @@
 #define _PCH_FPA1                0xc6044
 #define _PCH_FPB0                0xc6048
 #define _PCH_FPB1                0xc604c
-#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
 #define PCH_DPLL_TEST           0xc606c
 
@@ -3333,6 +3546,57 @@
 #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
 #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
+#define VLV_VIDEO_DIP_CTL_A		0x60220
+#define VLV_VIDEO_DIP_DATA_A		0x60208
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A	0x60210
+
+#define VLV_VIDEO_DIP_CTL_B		0x61170
+#define VLV_VIDEO_DIP_DATA_B		0x61174
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B	0x61178
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+	 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+	 _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+	_PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A		0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A	0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A		0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A	0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A	0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A	0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A		0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A		0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A		0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A		0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A		0x60344
+#define HSW_VIDEO_DIP_GCP_A		0x60210
+
+#define HSW_VIDEO_DIP_CTL_B		0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B	0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B		0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B	0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B	0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B	0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B		0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B		0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B		0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B		0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B		0x61344
+#define HSW_VIDEO_DIP_GCP_B		0x61210
+
+#define HSW_TVIDEO_DIP_CTL(pipe) \
+	 _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
+	 _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
+	 _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(pipe) \
+	_PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+
 #define _TRANS_HTOTAL_B          0xe1000
 #define _TRANS_HBLANK_B          0xe1004
 #define _TRANS_HSYNC_B           0xe1008
@@ -3492,6 +3756,9 @@
 #define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT	(2<<8)
 #define  FDI_LINK_TRAIN_NORMAL_CPT		(3<<8)
 #define  FDI_LINK_TRAIN_PATTERN_MASK_CPT	(3<<8)
+/* LPT */
+#define  FDI_PORT_WIDTH_2X_LPT			(1<<19)
+#define  FDI_PORT_WIDTH_1X_LPT			(0<<19)
 
 #define _FDI_RXA_MISC            0xf0010
 #define _FDI_RXB_MISC            0xf1010
@@ -3552,8 +3819,9 @@
 #define  ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
 
 /* or SDVOB */
+#define VLV_HDMIB 0x61140
 #define HDMIB   0xe1140
-#define  PORT_ENABLE    (1 << 31)
+#define  PORT_ENABLE    (1U << 31)
 #define  TRANSCODER(pipe)       ((pipe) << 30)
 #define  TRANSCODER_CPT(pipe)   ((pipe) << 29)
 #define  TRANSCODER_MASK        (1 << 30)
@@ -3583,13 +3851,13 @@
 #define  LVDS_DETECTED	(1 << 1)
 
 #define BLC_PWM_CPU_CTL2	0x48250
-#define  PWM_ENABLE		(1 << 31)
+#define  PWM_ENABLE		(1U << 31)
 #define  PWM_PIPE_A		(0 << 29)
 #define  PWM_PIPE_B		(1 << 29)
 #define BLC_PWM_CPU_CTL		0x48254
 
 #define BLC_PWM_PCH_CTL1	0xc8250
-#define  PWM_PCH_ENABLE		(1 << 31)
+#define  PWM_PCH_ENABLE		(1U << 31)
 #define  PWM_POLARITY_ACTIVE_LOW	(1 << 29)
 #define  PWM_POLARITY_ACTIVE_HIGH	(0 << 29)
 #define  PWM_POLARITY_ACTIVE_LOW2	(1 << 28)
@@ -3611,8 +3879,8 @@
 #define  PANEL_PORT_SELECT_LVDS	(0 << 30)
 #define  PANEL_PORT_SELECT_DPA	(1 << 30)
 #define  EDP_PANEL		(1 << 30)
-#define  PANEL_PORT_SELECT_DPC	(2 << 30)
-#define  PANEL_PORT_SELECT_DPD	(3 << 30)
+#define  PANEL_PORT_SELECT_DPC	(2U << 30)
+#define  PANEL_PORT_SELECT_DPD	(3U << 30)
 #define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
 #define  PANEL_POWER_UP_DELAY_SHIFT	16
 #define  PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
@@ -3717,6 +3985,8 @@
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB	(0x3f<<22)
 
 #define  FORCEWAKE				0xA18C
+#define  FORCEWAKE_VLV				0x1300b0
+#define  FORCEWAKE_ACK_VLV			0x1300b4
 #define  FORCEWAKE_ACK				0x130090
 #define  FORCEWAKE_MT				0xa188 /* multi-threaded */
 #define  FORCEWAKE_MT_ACK			0x130040
@@ -3734,6 +4004,7 @@
 
 #define GEN6_UCGCTL1				0x9400
 # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE			(1 << 7)
 
 #define GEN6_UCGCTL2				0x9404
 # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE		(1 << 13)
@@ -3811,9 +4082,14 @@
 #define  GEN6_PM_RP_UP_EI_EXPIRED		(1<<2)
 #define  GEN6_PM_RP_DOWN_EI_EXPIRED		(1<<1)
 #define  GEN6_PM_DEFERRED_EVENTS     (GEN6_PM_RP_UP_THRESHOLD | \
-                  GEN6_PM_RP_DOWN_THRESHOLD | \
-                  GEN6_PM_RP_DOWN_TIMEOUT)
+						 GEN6_PM_RP_DOWN_THRESHOLD | \
+						 GEN6_PM_RP_DOWN_TIMEOUT)
 
+#define GEN6_GT_GFX_RC6_LOCKED			0x138104
+#define GEN6_GT_GFX_RC6				0x138108
+#define GEN6_GT_GFX_RC6p			0x13810C
+#define GEN6_GT_GFX_RC6pp			0x138110
+
 #define GEN6_PCODE_MAILBOX			0x138124
 #define   GEN6_PCODE_READY			(1<<31)
 #define   GEN6_READ_OC_PARAMS			0xc
@@ -3873,4 +4149,197 @@
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI		(0xf << 16)
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 
+/* HSW Power Wells */
+#define HSW_PWR_WELL_CTL1		0x45400		/* BIOS */
+#define HSW_PWR_WELL_CTL2		0x45404		/* Driver */
+#define HSW_PWR_WELL_CTL3		0x45408		/* KVMR */
+#define HSW_PWR_WELL_CTL4		0x4540C		/* Debug */
+#define   HSW_PWR_WELL_ENABLE				(1<<31)
+#define   HSW_PWR_WELL_STATE				(1<<30)
+#define HSW_PWR_WELL_CTL5		0x45410
+#define   HSW_PWR_WELL_ENABLE_SINGLE_STEP	(1<<31)
+#define   HSW_PWR_WELL_PWR_GATE_OVERRIDE	(1<<20)
+#define   HSW_PWR_WELL_FORCE_ON				(1<<19)
+#define HSW_PWR_WELL_CTL6		0x45414
+
+/* Per-pipe DDI Function Control */
+#define PIPE_DDI_FUNC_CTL_A			0x60400
+#define PIPE_DDI_FUNC_CTL_B			0x61400
+#define PIPE_DDI_FUNC_CTL_C			0x62400
+#define PIPE_DDI_FUNC_CTL_EDP		0x6F400
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
+					PIPE_DDI_FUNC_CTL_A, \
+					PIPE_DDI_FUNC_CTL_B)
+#define  PIPE_DDI_FUNC_ENABLE		(1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define  PIPE_DDI_PORT_MASK				(0xf<<28)
+#define  PIPE_DDI_SELECT_PORT(x)		((x)<<28)
+#define  PIPE_DDI_MODE_SELECT_HDMI		(0<<24)
+#define  PIPE_DDI_MODE_SELECT_DVI		(1<<24)
+#define  PIPE_DDI_MODE_SELECT_DP_SST	(2<<24)
+#define  PIPE_DDI_MODE_SELECT_DP_MST	(3<<24)
+#define  PIPE_DDI_MODE_SELECT_FDI		(4<<24)
+#define  PIPE_DDI_BPC_8					(0<<20)
+#define  PIPE_DDI_BPC_10				(1<<20)
+#define  PIPE_DDI_BPC_6					(2<<20)
+#define  PIPE_DDI_BPC_12				(3<<20)
+#define  PIPE_DDI_BFI_ENABLE			(1<<4)
+#define  PIPE_DDI_PORT_WIDTH_X1			(0<<1)
+#define  PIPE_DDI_PORT_WIDTH_X2			(1<<1)
+#define  PIPE_DDI_PORT_WIDTH_X4			(3<<1)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A			0x64040
+#define DP_TP_CTL_B			0x64140
+#define DP_TP_CTL(port) _PORT(port, \
+					DP_TP_CTL_A, \
+					DP_TP_CTL_B)
+#define  DP_TP_CTL_ENABLE		(1<<31)
+#define  DP_TP_CTL_MODE_SST	(0<<27)
+#define  DP_TP_CTL_MODE_MST	(1<<27)
+#define  DP_TP_CTL_ENHANCED_FRAME_ENABLE	(1<<18)
+#define  DP_TP_CTL_FDI_AUTOTRAIN	(1<<15)
+#define  DP_TP_CTL_LINK_TRAIN_MASK		(7<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT1		(0<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT2		(1<<8)
+#define  DP_TP_CTL_LINK_TRAIN_NORMAL	(3<<8)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A			0x64044
+#define DP_TP_STATUS_B			0x64144
+#define DP_TP_STATUS(port) _PORT(port, \
+					DP_TP_STATUS_A, \
+					DP_TP_STATUS_B)
+#define  DP_TP_STATUS_AUTOTRAIN_DONE	(1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A				0x64000
+#define DDI_BUF_CTL_B				0x64100
+#define DDI_BUF_CTL(port) _PORT(port, \
+					DDI_BUF_CTL_A, \
+					DDI_BUF_CTL_B)
+#define  DDI_BUF_CTL_ENABLE				(1<<31)
+#define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
+#define  DDI_BUF_EMP_400MV_3_5DB_HSW	(1<<24)   /* Sel1 */
+#define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
+#define  DDI_BUF_EMP_400MV_9_5DB_HSW	(3<<24)   /* Sel3 */
+#define  DDI_BUF_EMP_600MV_0DB_HSW		(4<<24)   /* Sel4 */
+#define  DDI_BUF_EMP_600MV_3_5DB_HSW	(5<<24)   /* Sel5 */
+#define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
+#define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
+#define  DDI_BUF_EMP_800MV_3_5DB_HSW	(8<<24)   /* Sel8 */
+#define  DDI_BUF_EMP_MASK				(0xf<<24)
+#define  DDI_BUF_IS_IDLE				(1<<7)
+#define  DDI_PORT_WIDTH_X1				(0<<1)
+#define  DDI_PORT_WIDTH_X2				(1<<1)
+#define  DDI_PORT_WIDTH_X4				(3<<1)
+#define  DDI_INIT_DISPLAY_DETECTED		(1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A				0x64E00
+#define DDI_BUF_TRANS_B				0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, \
+					DDI_BUF_TRANS_A, \
+					DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR				0xC6000
+#define SBI_DATA				0xC6004
+#define SBI_CTL_STAT			0xC6008
+#define  SBI_CTL_OP_CRRD		(0x6<<8)
+#define  SBI_CTL_OP_CRWR		(0x7<<8)
+#define  SBI_RESPONSE_FAIL		(0x1<<1)
+#define  SBI_RESPONSE_SUCCESS	(0x0<<1)
+#define  SBI_BUSY				(0x1<<0)
+#define  SBI_READY				(0x0<<0)
+
+/* SBI offsets */
+#define  SBI_SSCDIVINTPHASE6		0x0600
+#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK	((0x7f)<<1)
+#define   SBI_SSCDIVINTPHASE_DIVSEL(x)		((x)<<1)
+#define   SBI_SSCDIVINTPHASE_INCVAL_MASK	((0x7f)<<8)
+#define   SBI_SSCDIVINTPHASE_INCVAL(x)		((x)<<8)
+#define   SBI_SSCDIVINTPHASE_DIR(x)			((x)<<15)
+#define   SBI_SSCDIVINTPHASE_PROPAGATE		(1<<0)
+#define  SBI_SSCCTL					0x020c
+#define  SBI_SSCCTL6				0x060C
+#define   SBI_SSCCTL_DISABLE		(1<<0)
+#define  SBI_SSCAUXDIV6				0x0610
+#define   SBI_SSCAUXDIV_FINALDIV2SEL(x)		((x)<<4)
+#define  SBI_DBUFF0					0x2a00
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE				0xC6020
+#define  PIXCLK_GATE_UNGATE		1<<0
+#define  PIXCLK_GATE_GATE		0<<0
+
+/* SPLL */
+#define SPLL_CTL				0x46020
+#define  SPLL_PLL_ENABLE		(1<<31)
+#define  SPLL_PLL_SCC			(1<<28)
+#define  SPLL_PLL_NON_SCC		(2<<28)
+#define  SPLL_PLL_FREQ_810MHz	(0<<26)
+#define  SPLL_PLL_FREQ_1350MHz	(1<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1				0x46040
+#define WRPLL_CTL2				0x46060
+#define  WRPLL_PLL_ENABLE				(1<<31)
+#define  WRPLL_PLL_SELECT_SSC			(0x01<<28)
+#define  WRPLL_PLL_SELECT_NON_SCC		(0x02<<28)
+#define  WRPLL_PLL_SELECT_LCPLL_2700	(0x03<<28)
+/* WRPLL divider programming */
+#define  WRPLL_DIVIDER_REFERENCE(x)		((x)<<0)
+#define  WRPLL_DIVIDER_POST(x)			((x)<<8)
+#define  WRPLL_DIVIDER_FEEDBACK(x)		((x)<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A			0x46100
+#define PORT_CLK_SEL_B			0x46104
+#define PORT_CLK_SEL(port) _PORT(port, \
+					PORT_CLK_SEL_A, \
+					PORT_CLK_SEL_B)
+#define  PORT_CLK_SEL_LCPLL_2700	(0<<29)
+#define  PORT_CLK_SEL_LCPLL_1350	(1<<29)
+#define  PORT_CLK_SEL_LCPLL_810		(2<<29)
+#define  PORT_CLK_SEL_SPLL			(3<<29)
+#define  PORT_CLK_SEL_WRPLL1		(4<<29)
+#define  PORT_CLK_SEL_WRPLL2		(5<<29)
+
+/* Pipe clock selection */
+#define PIPE_CLK_SEL_A			0x46140
+#define PIPE_CLK_SEL_B			0x46144
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
+					PIPE_CLK_SEL_A, \
+					PIPE_CLK_SEL_B)
+/* For each pipe, we need to select the corresponding port clock */
+#define  PIPE_CLK_SEL_DISABLED	(0x0<<29)
+#define  PIPE_CLK_SEL_PORT(x)	((x+1)<<29)
+
+/* LCPLL Control */
+#define LCPLL_CTL				0x130040
+#define  LCPLL_PLL_DISABLE		(1<<31)
+#define  LCPLL_PLL_LOCK			(1<<30)
+#define  LCPLL_CD_CLOCK_DISABLE	(1<<25)
+#define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A		0x45270
+#define PIPE_WM_LINETIME_B		0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
+					PIPE_WM_LINETIME_A, \
+					PIPE_WM_LINETIME_A)
+#define   PIPE_WM_LINETIME_MASK		(0x1ff)
+#define   PIPE_WM_LINETIME_TIME(x)			((x))
+#define   PIPE_WM_LINETIME_IPS_LINETIME_MASK	(0x1ff<<16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME(x)		((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP				0xc2014
+#define  SFUSE_STRAP_DDIB_DETECTED	(1<<2)
+#define  SFUSE_STRAP_DDIC_DETECTED	(1<<1)
+#define  SFUSE_STRAP_DDID_DETECTED	(1<<0)
+
 #endif /* _I915_REG_H_ */

Modified: trunk/sys/dev/drm2/i915/i915_suspend.c
===================================================================
--- trunk/sys/dev/drm2/i915/i915_suspend.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/i915_suspend.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  *
  * Copyright 2008 (c) Intel Corporation
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/i915_suspend.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_suspend.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -42,7 +43,7 @@
 		return false;
 
 	if (HAS_PCH_SPLIT(dev))
-		dpll_reg = PCH_DPLL(pipe);
+		dpll_reg = _PCH_DPLL(pipe);
 	else
 		dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
 
@@ -460,7 +461,7 @@
 		I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
 			   ~DPLL_VCO_ENABLE);
 		POSTING_READ(dpll_a_reg);
-		DRM_UDELAY(150);
+		udelay(150);
 	}
 	I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
 	I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
@@ -467,12 +468,12 @@
 	/* Actually enable it */
 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
 	POSTING_READ(dpll_a_reg);
-	DRM_UDELAY(150);
+	udelay(150);
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
 		POSTING_READ(_DPLL_A_MD);
 	}
-	DRM_UDELAY(150);
+	udelay(150);
 
 	/* Restore mode */
 	I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
@@ -529,7 +530,7 @@
 		I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
 			   ~DPLL_VCO_ENABLE);
 		POSTING_READ(dpll_b_reg);
-		DRM_UDELAY(150);
+		udelay(150);
 	}
 	I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
 	I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
@@ -536,12 +537,12 @@
 	/* Actually enable it */
 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
 	POSTING_READ(dpll_b_reg);
-	DRM_UDELAY(150);
+	udelay(150);
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
 		POSTING_READ(_DPLL_B_MD);
 	}
-	DRM_UDELAY(150);
+	udelay(150);
 
 	/* Restore mode */
 	I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
@@ -792,7 +793,7 @@
 	I915_WRITE(VGA1, dev_priv->saveVGA1);
 	I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
 	POSTING_READ(VGA_PD);
-	DRM_UDELAY(150);
+	udelay(150);
 
 	i915_restore_vga(dev);
 }
@@ -802,11 +803,13 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	dev_priv->saveLBB = pci_read_config(dev->device, LBB, 1);
+	dev_priv->saveLBB = pci_read_config(dev->dev, LBB, 1);
 
 	/* Hardware status page */
 	dev_priv->saveHWS = I915_READ(HWS_PGA);
 
+	DRM_LOCK(dev);
+
 	i915_save_display(dev);
 
 	/* Interrupt state */
@@ -844,6 +847,8 @@
 	for (i = 0; i < 3; i++)
 		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
+	DRM_UNLOCK(dev);
+
 	return 0;
 }
 
@@ -852,8 +857,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1);
+	pci_write_config(dev->dev, LBB, dev_priv->saveLBB, 1);
 
+	DRM_LOCK(dev);
 
 	/* Hardware status page */
 	I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -873,23 +879,7 @@
 		I915_WRITE(IER, dev_priv->saveIER);
 		I915_WRITE(IMR, dev_priv->saveIMR);
 	}
-	DRM_UNLOCK(dev);
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		intel_init_clock_gating(dev);
-
-	if (IS_IRONLAKE_M(dev)) {
-		ironlake_enable_drps(dev);
-		intel_init_emon(dev);
-	}
-
-	if (INTEL_INFO(dev)->gen >= 6) {
-		gen6_enable_rps(dev_priv);
-		gen6_update_ring_freq(dev_priv);
-	}
-
-	DRM_LOCK(dev);
-
 	/* Cache mode state */
 	I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
 
@@ -903,6 +893,8 @@
 	for (i = 0; i < 3; i++)
 		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 
+	DRM_UNLOCK(dev);
+
 	intel_iic_reset(dev);
 
 	return 0;

Modified: trunk/sys/dev/drm2/i915/intel_bios.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_bios.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_bios.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2006 Intel Corporation
  *
@@ -23,7 +24,7 @@
  * Authors:
  *    Eric Anholt <eric at anholt.net>
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_bios.c 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/i915/intel_bios.c 280369 2015-03-23 13:38:33Z kib $
  */
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -175,6 +176,28 @@
 	return (const struct lvds_dvo_timing *)(entry + dvo_timing_offset);
 }
 
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+		   const struct bdb_lvds_lfp_data *data,
+		   const struct bdb_lvds_lfp_data_ptrs *ptrs,
+		   int index)
+{
+	size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+	u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+	size_t ofs;
+
+	if (index >= DRM_ARRAY_SIZE(ptrs->ptr))
+		return NULL;
+	ofs = ptrs->ptr[index].fp_timing_offset;
+	if (ofs < data_ofs ||
+	    ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+		return NULL;
+	return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
 /* Try to find integrated panel data */
 static void
 parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@@ -184,6 +207,7 @@
 	const struct bdb_lvds_lfp_data *lvds_lfp_data;
 	const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
 	const struct lvds_dvo_timing *panel_dvo_timing;
+	const struct lvds_fp_timing *fp_timing;
 	struct drm_display_mode *panel_fixed_mode;
 	int i, downclock;
 
@@ -244,6 +268,19 @@
 			      "Normal Clock %dKHz, downclock %dKHz\n",
 			  panel_fixed_mode->clock, 10 * downclock);
 	}
+
+	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+				       lvds_lfp_data_ptrs,
+				       lvds_options->panel_type);
+	if (fp_timing) {
+		/* check the resolution, just to be sure */
+		if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+		    fp_timing->y_res == panel_fixed_mode->vdisplay) {
+			dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
+			DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+				      dev_priv->bios_lvds_val);
+		}
+	}
 }
 
 /* Try to find sdvo panel data */
@@ -256,6 +293,11 @@
 	int index;
 
 	index = i915_vbt_sdvo_panel_type;
+	if (index == -2) {
+		DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+		return;
+	}
+
 	if (index == -1) {
 		struct bdb_sdvo_lvds_options *sdvo_lvds_options;
 
@@ -331,11 +373,11 @@
 		if (block_size >= sizeof(*general)) {
 			int bus_pin = general->crt_ddc_gmbus_pin;
 			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
-			if (bus_pin >= 1 && bus_pin <= 6)
+			if (intel_gmbus_is_port_valid(bus_pin))
 				dev_priv->crt_ddc_pin = bus_pin;
 		} else {
 			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
-				  block_size);
+				      block_size);
 		}
 	}
 }

Modified: trunk/sys/dev/drm2/i915/intel_bios.h
===================================================================
--- trunk/sys/dev/drm2/i915/intel_bios.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_bios.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2006 Intel Corporation
  *
@@ -23,7 +24,7 @@
  * Authors:
  *    Eric Anholt <eric at anholt.net>
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_bios.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/i915/intel_bios.h 235783 2012-05-22 11:07:44Z kib $
  */
 
 #ifndef _I830_BIOS_H_

Modified: trunk/sys/dev/drm2/i915/intel_crt.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_crt.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_crt.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2006-2007 Intel Corporation
  *
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_crt.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_crt.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -55,18 +56,36 @@
 			    struct intel_crt, base);
 }
 
-static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 temp, reg;
+	u32 temp;
 
-	if (HAS_PCH_SPLIT(dev))
-		reg = PCH_ADPA;
-	else
-		reg = ADPA;
+	temp = I915_READ(PCH_ADPA);
+	temp &= ~ADPA_DAC_ENABLE;
 
-	temp = I915_READ(reg);
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		temp |= ADPA_DAC_ENABLE;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		/* Just leave port enable cleared */
+		break;
+	}
+
+	I915_WRITE(PCH_ADPA, temp);
+}
+
+static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 temp;
+
+	temp = I915_READ(ADPA);
 	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
 	temp &= ~ADPA_DAC_ENABLE;
 
@@ -85,7 +104,7 @@
 		break;
 	}
 
-	I915_WRITE(reg, temp);
+	I915_WRITE(ADPA, temp);
 }
 
 static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -111,7 +130,7 @@
 }
 
 static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
-				 struct drm_display_mode *mode,
+				 const struct drm_display_mode *mode,
 				 struct drm_display_mode *adjusted_mode)
 {
 	return true;
@@ -279,9 +298,10 @@
 	if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
 		struct edid *edid;
 		bool is_digital = false;
+		device_t iic;
 
-		edid = drm_get_edid(connector,
-		    dev_priv->gmbus[dev_priv->crt_ddc_pin]);
+		iic = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+		edid = drm_get_edid(connector, iic);
 		/*
 		 * This may be a DVI-I connector with a shared DDC
 		 * link between analog and digital outputs, so we
@@ -291,7 +311,6 @@
 		 */
 		if (edid != NULL) {
 			is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
-			connector->display_info.raw_edid = NULL;
 			free(edid, DRM_MEM_KMS);
 		}
 
@@ -480,15 +499,16 @@
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
+	device_t iic;
 
-	ret = intel_ddc_get_modes(connector,
-	    dev_priv->gmbus[dev_priv->crt_ddc_pin]);
+	iic = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+	ret = intel_ddc_get_modes(connector, iic);
 	if (ret || !IS_G4X(dev))
 		return ret;
 
 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
-	return (intel_ddc_get_modes(connector,
-	    dev_priv->gmbus[GMBUS_PORT_DPB]));
+	iic = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+	return intel_ddc_get_modes(connector, iic);
 }
 
 static int intel_crt_set_property(struct drm_connector *connector,
@@ -511,14 +531,22 @@
  * Routines for controlling stuff on the analog port
  */
 
-static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
-	.dpms = intel_crt_dpms,
+static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
 	.mode_fixup = intel_crt_mode_fixup,
 	.prepare = intel_encoder_prepare,
 	.commit = intel_encoder_commit,
 	.mode_set = intel_crt_mode_set,
+	.dpms = pch_crt_dpms,
 };
 
+static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
+	.mode_fixup = intel_crt_mode_fixup,
+	.prepare = intel_encoder_prepare,
+	.commit = intel_encoder_commit,
+	.mode_set = intel_crt_mode_set,
+	.dpms = gmch_crt_dpms,
+};
+
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
 	.reset = intel_crt_reset,
 	.dpms = drm_helper_connector_dpms,
@@ -540,7 +568,7 @@
 
 static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
 {
-	DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
+	DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
 	return 1;
 }
 
@@ -562,6 +590,7 @@
 	struct intel_crt *crt;
 	struct intel_connector *intel_connector;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct drm_encoder_helper_funcs *encoder_helper_funcs;
 
 	/* Skip machines without VGA that falsely report hotplug events */
 	if (dmi_check_system(intel_no_crt))
@@ -584,7 +613,11 @@
 	crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
 				1 << INTEL_ANALOG_CLONE_BIT |
 				1 << INTEL_SDVO_LVDS_CLONE_BIT);
-	crt->base.crtc_mask = (1 << 0) | (1 << 1);
+	if (IS_HASWELL(dev))
+		crt->base.crtc_mask = (1 << 0);
+	else
+		crt->base.crtc_mask = (1 << 0) | (1 << 1);
+
 	if (IS_GEN2(dev))
 		connector->interlace_allowed = 0;
 	else
@@ -591,7 +624,12 @@
 		connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
 
-	drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
+	if (HAS_PCH_SPLIT(dev))
+		encoder_helper_funcs = &pch_encoder_funcs;
+	else
+		encoder_helper_funcs = &gmch_encoder_funcs;
+
+	drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
 #if 0

Modified: trunk/sys/dev/drm2/i915/intel_display.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_display.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_display.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2006-2007 Intel Corporation
  *
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_display.c 236931 2012-06-11 21:41:45Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_display.c 314667 2017-03-04 13:03:31Z avg $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -35,13 +36,11 @@
 #include <dev/drm2/drm_edid.h>
 #include <dev/drm2/drm_dp_helper.h>
 #include <dev/drm2/drm_crtc_helper.h>
-#include <sys/kdb.h>
 #include <sys/limits.h>
 
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-static void intel_update_watermarks(struct drm_device *dev);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
@@ -357,6 +356,110 @@
 	.find_pll = intel_find_pll_ironlake_dp,
 };
 
+u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+	u32 val = 0;
+
+	mtx_lock(&dev_priv->dpio_lock);
+	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+		DRM_ERROR("DPIO idle wait timed out\n");
+		goto out_unlock;
+	}
+
+	I915_WRITE(DPIO_REG, reg);
+	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
+		   DPIO_BYTE);
+	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+		DRM_ERROR("DPIO read wait timed out\n");
+		goto out_unlock;
+	}
+	val = I915_READ(DPIO_DATA);
+
+out_unlock:
+	mtx_unlock(&dev_priv->dpio_lock);
+	return val;
+}
+
+#if 0
+static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
+			     u32 val)
+{
+
+	mtx_lock(&dev_priv->dpio_lock);
+	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+		DRM_ERROR("DPIO idle wait timed out\n");
+		goto out_unlock;
+	}
+
+	I915_WRITE(DPIO_DATA, val);
+	I915_WRITE(DPIO_REG, reg);
+	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
+		   DPIO_BYTE);
+	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
+		DRM_ERROR("DPIO write wait timed out\n");
+
+out_unlock:
+	mtx_unlock(&dev_priv->dpio_lock);
+}
+#endif
+
+static void vlv_init_dpio(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Reset the DPIO config */
+	I915_WRITE(DPIO_CTL, 0);
+	POSTING_READ(DPIO_CTL);
+	I915_WRITE(DPIO_CTL, 1);
+	POSTING_READ(DPIO_CTL);
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+	{
+		.callback = intel_dual_link_lvds_callback,
+		.ident = "Apple MacBook Pro (Core i5/i7 Series)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+		},
+	},
+	{ }	/* terminating entry */
+};
+
+static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
+			      unsigned int reg)
+{
+	unsigned int val;
+
+	/* use the module option value if specified */
+	if (i915_lvds_channel_mode > 0)
+		return i915_lvds_channel_mode == 2;
+
+	if (dmi_check_system(intel_dual_link_lvds))
+		return true;
+
+	if (dev_priv->lvds_val)
+		val = dev_priv->lvds_val;
+	else {
+		/* BIOS should set the proper LVDS register value at boot, but
+		 * in reality, it doesn't set the value when the lid is closed;
+		 * we need to check "the value to be set" in VBT when LVDS
+		 * register is uninitialized.
+		 */
+		val = I915_READ(reg);
+		if (!(val & ~LVDS_DETECTED))
+			val = dev_priv->bios_lvds_val;
+		dev_priv->lvds_val = val;
+	}
+	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
 						int refclk)
 {
@@ -365,8 +468,7 @@
 	const intel_limit_t *limit;
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
-		    LVDS_CLKB_POWER_UP) {
+		if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
 			/* LVDS dual channel */
 			if (refclk == 100000)
 				limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -394,8 +496,7 @@
 	const intel_limit_t *limit;
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
-		    LVDS_CLKB_POWER_UP)
+		if (is_dual_link_lvds(dev_priv, LVDS))
 			/* LVDS with dual channel */
 			limit = &intel_limits_g4x_dual_channel_lvds;
 		else
@@ -533,8 +634,7 @@
 		 * reliably set up different single/dual channel state, if we
 		 * even can.
 		 */
-		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
-		    LVDS_CLKB_POWER_UP)
+		if (is_dual_link_lvds(dev_priv, LVDS))
 			clock.p2 = limit->p2.p2_fast;
 		else
 			clock.p2 = limit->p2.p2_slow;
@@ -703,6 +803,17 @@
 	return true;
 }
 
+static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 frame, frame_reg = PIPEFRAME(pipe);
+
+	frame = I915_READ(frame_reg);
+
+	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
+		DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
 /**
  * intel_wait_for_vblank - wait for vblank on a given pipe
  * @dev: drm device
@@ -716,6 +827,11 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipestat_reg = PIPESTAT(pipe);
 
+	if (INTEL_INFO(dev)->gen >= 5) {
+		ironlake_wait_for_vblank(dev, pipe);
+		return;
+	}
+
 	/* Clear existing vblank status. Note this will clear any other
 	 * sticky status fields as well.
 	 *
@@ -769,15 +885,20 @@
 		    1, "915pip"))
 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
 	} else {
-		u32 last_line;
+		u32 last_line, line_mask;
 		int reg = PIPEDSL(pipe);
 		unsigned long timeout = jiffies + msecs_to_jiffies(100);
 
+		if (IS_GEN2(dev))
+			line_mask = DSL_LINEMASK_GEN2;
+		else
+			line_mask = DSL_LINEMASK_GEN3;
+
 		/* Wait for the display line to settle */
 		do {
-			last_line = I915_READ(reg) & DSL_LINEMASK;
+			last_line = I915_READ(reg) & line_mask;
 			DELAY(5000);
-		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+		} while (((I915_READ(reg) & line_mask) != last_line) &&
 			 time_after(timeout, jiffies));
 		if (time_after(jiffies, timeout))
 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
@@ -809,12 +930,22 @@
 
 /* For ILK+ */
 static void assert_pch_pll(struct drm_i915_private *dev_priv,
-			   enum pipe pipe, bool state)
+			   struct intel_crtc *intel_crtc, bool state)
 {
 	int reg;
 	u32 val;
 	bool cur_state;
 
+	if (HAS_PCH_LPT(dev_priv->dev)) {
+		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
+		return;
+	}
+
+	if (!intel_crtc->pch_pll) {
+		printf("asserting PCH PLL enabled with no PLL\n");
+		return;
+	}
+
 	if (HAS_PCH_CPT(dev_priv->dev)) {
 		u32 pch_dpll;
 
@@ -821,14 +952,11 @@
 		pch_dpll = I915_READ(PCH_DPLL_SEL);
 
 		/* Make sure the selected PLL is enabled to the transcoder */
-		KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0,
-		    ("transcoder %d PLL not enabled\n", pipe));
-
-		/* Convert the transcoder pipe number to a pll pipe number */
-		pipe = (pch_dpll >> (4 * pipe)) & 1;
+		KASSERT(((pch_dpll >> (4 * intel_crtc->pipe)) & 8) != 0,
+		    ("transcoder %d PLL not enabled\n", intel_crtc->pipe));
 	}
 
-	reg = PCH_DPLL(pipe);
+	reg = intel_crtc->pch_pll->pll_reg;
 	val = I915_READ(reg);
 	cur_state = !!(val & DPLL_VCO_ENABLE);
 	if (cur_state != state)
@@ -845,9 +973,16 @@
 	u32 val;
 	bool cur_state;
 
-	reg = FDI_TX_CTL(pipe);
-	val = I915_READ(reg);
-	cur_state = !!(val & FDI_TX_ENABLE);
+	if (IS_HASWELL(dev_priv->dev)) {
+		/* On Haswell, DDI is used instead of FDI_TX_CTL */
+		reg = DDI_FUNC_CTL(pipe);
+		val = I915_READ(reg);
+		cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+	} else {
+		reg = FDI_TX_CTL(pipe);
+		val = I915_READ(reg);
+		cur_state = !!(val & FDI_TX_ENABLE);
+	}
 	if (cur_state != state)
 		printf("FDI TX state assertion failure (expected %s, current %s)\n",
 		    state_string(state), state_string(cur_state));
@@ -862,9 +997,14 @@
 	u32 val;
 	bool cur_state;
 
-	reg = FDI_RX_CTL(pipe);
-	val = I915_READ(reg);
-	cur_state = !!(val & FDI_RX_ENABLE);
+	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+			DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
+			return;
+	} else {
+		reg = FDI_RX_CTL(pipe);
+		val = I915_READ(reg);
+		cur_state = !!(val & FDI_RX_ENABLE);
+	}
 	if (cur_state != state)
 		printf("FDI RX state assertion failure (expected %s, current %s)\n",
 		    state_string(state), state_string(cur_state));
@@ -882,6 +1022,10 @@
 	if (dev_priv->info->gen == 5)
 		return;
 
+	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
+	if (IS_HASWELL(dev_priv->dev))
+		return;
+
 	reg = FDI_TX_CTL(pipe);
 	val = I915_READ(reg);
 	if (!(val & FDI_TX_PLL_ENABLE))
@@ -894,6 +1038,10 @@
 	int reg;
 	u32 val;
 
+	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+		DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
+		return;
+	}
 	reg = FDI_RX_CTL(pipe);
 	val = I915_READ(reg);
 	if (!(val & FDI_RX_PLL_ENABLE))
@@ -1000,6 +1148,11 @@
 	u32 val;
 	bool enabled;
 
+	if (HAS_PCH_LPT(dev_priv->dev)) {
+		DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
+		return;
+	}
+
 	val = I915_READ(PCH_DREF_CONTROL);
 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 			    DREF_SUPERSPREAD_SOURCE_MASK));
@@ -1199,6 +1352,68 @@
 	POSTING_READ(reg);
 }
 
+/* SBI access */
+static void
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+{
+
+	mtx_lock(&dev_priv->dpio_lock);
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		goto out_unlock;
+	}
+
+	I915_WRITE(SBI_ADDR,
+			(reg << 16));
+	I915_WRITE(SBI_DATA,
+			value);
+	I915_WRITE(SBI_CTL_STAT,
+			SBI_BUSY |
+			SBI_CTL_OP_CRWR);
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+		goto out_unlock;
+	}
+
+out_unlock:
+	mtx_unlock(&dev_priv->dpio_lock);
+}
+
+static u32
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+{
+	u32 value;
+
+	value = 0;
+	mtx_lock(&dev_priv->dpio_lock);
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		goto out_unlock;
+	}
+
+	I915_WRITE(SBI_ADDR,
+			(reg << 16));
+	I915_WRITE(SBI_CTL_STAT,
+			SBI_BUSY |
+			SBI_CTL_OP_CRRD);
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+		goto out_unlock;
+	}
+
+	value = I915_READ(SBI_DATA);
+
+out_unlock:
+	mtx_unlock(&dev_priv->dpio_lock);
+	return value;
+}
+
 /**
  * intel_enable_pch_pll - enable PCH PLL
  * @dev_priv: i915 private structure
@@ -1207,60 +1422,93 @@
  * The PCH PLL needs to be enabled before the PCH transcoder, since it
  * drives the transcoder clock.
  */
-static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
-				 enum pipe pipe)
+static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
 {
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	struct intel_pch_pll *pll;
 	int reg;
 	u32 val;
 
-	if (pipe > 1)
+	/* PCH PLLs only available on ILK, SNB and IVB */
+	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+	pll = intel_crtc->pch_pll;
+	if (pll == NULL)
 		return;
 
-	/* PCH only available on ILK+ */
-	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+	if (pll->refcount == 0) {
+		DRM_DEBUG_KMS("pll->refcount == 0\n");
+		return;
+	}
 
+	DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
+		      pll->pll_reg, pll->active, pll->on,
+		      intel_crtc->base.base.id);
+
 	/* PCH refclock must be enabled first */
 	assert_pch_refclk_enabled(dev_priv);
 
-	reg = PCH_DPLL(pipe);
+	if (pll->active++ && pll->on) {
+		assert_pch_pll_enabled(dev_priv, intel_crtc);
+		return;
+	}
+
+	DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
+
+	reg = pll->pll_reg;
 	val = I915_READ(reg);
 	val |= DPLL_VCO_ENABLE;
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
 	DELAY(200);
+
+	pll->on = true;
 }
 
-static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
-				  enum pipe pipe)
+static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
 {
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	struct intel_pch_pll *pll = intel_crtc->pch_pll;
 	int reg;
-	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
-		pll_sel = TRANSC_DPLL_ENABLE;
+	u32 val;
 
-	if (pipe > 1)
-		return;
-
 	/* PCH only available on ILK+ */
 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+	if (pll == NULL)
+		return;
 
-	/* Make sure transcoder isn't still depending on us */
-	assert_transcoder_disabled(dev_priv, pipe);
+	if (pll->refcount == 0) {
+		DRM_DEBUG_KMS("pll->refcount == 0\n");
+		return;
+	}
 
-	if (pipe == 0)
-		pll_sel |= TRANSC_DPLLA_SEL;
-	else if (pipe == 1)
-		pll_sel |= TRANSC_DPLLB_SEL;
+	DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
+		      pll->pll_reg, pll->active, pll->on,
+		      intel_crtc->base.base.id);
 
+	if (pll->active == 0) {
+		DRM_DEBUG_KMS("pll->active == 0\n");
+		assert_pch_pll_disabled(dev_priv, intel_crtc);
+		return;
+	}
 
-	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+	if (--pll->active) {
+		assert_pch_pll_enabled(dev_priv, intel_crtc);
 		return;
+	}
 
-	reg = PCH_DPLL(pipe);
+	DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
+
+	/* Make sure transcoder isn't still depending on us */
+	assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
+ 
+	reg = pll->pll_reg;
 	val = I915_READ(reg);
 	val &= ~DPLL_VCO_ENABLE;
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
 	DELAY(200);
+
+	pll->on = false;
 }
 
 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
@@ -1274,17 +1522,19 @@
 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
 
 	/* Make sure PCH DPLL is enabled */
-	assert_pch_pll_enabled(dev_priv, pipe);
+	assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
 
 	/* FDI must be feeding us bits for PCH ports */
 	assert_fdi_tx_enabled(dev_priv, pipe);
 	assert_fdi_rx_enabled(dev_priv, pipe);
 
-
+	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+		DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
+		return;
+	}
 	reg = TRANSCONF(pipe);
 	val = I915_READ(reg);
 	pipeconf_val = I915_READ(PIPECONF(pipe));
-
 	if (HAS_PCH_IBX(dev_priv->dev)) {
 		/*
 		 * make the BPC in transcoder be consistent with
@@ -1420,7 +1670,7 @@
  * Plane regs are double buffered, going from enabled->disabled needs a
  * trigger in order to latch.  The display address reg provides this.
  */
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 				      enum plane plane)
 {
 	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
@@ -1531,486 +1781,6 @@
 	disable_pch_hdmi(dev_priv, pipe, HDMID);
 }
 
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 fbc_ctl;
-
-	/* Disable compression */
-	fbc_ctl = I915_READ(FBC_CONTROL);
-	if ((fbc_ctl & FBC_CTL_EN) == 0)
-		return;
-
-	fbc_ctl &= ~FBC_CTL_EN;
-	I915_WRITE(FBC_CONTROL, fbc_ctl);
-
-	/* Wait for compressing bit to clear */
-	if (_intel_wait_for(dev,
-	    (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10,
-	    1, "915fbd")) {
-		DRM_DEBUG_KMS("FBC idle timed out\n");
-		return;
-	}
-
-	DRM_DEBUG_KMS("disabled FBC\n");
-}
-
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->fb;
-	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj = intel_fb->obj;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int cfb_pitch;
-	int plane, i;
-	u32 fbc_ctl, fbc_ctl2;
-
-	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
-	if (fb->pitches[0] < cfb_pitch)
-		cfb_pitch = fb->pitches[0];
-
-	/* FBC_CTL wants 64B units */
-	cfb_pitch = (cfb_pitch / 64) - 1;
-	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
-
-	/* Clear old tags */
-	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
-		I915_WRITE(FBC_TAG + (i * 4), 0);
-
-	/* Set it up... */
-	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-	fbc_ctl2 |= plane;
-	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-	I915_WRITE(FBC_FENCE_OFF, crtc->y);
-
-	/* enable it... */
-	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
-	if (IS_I945GM(dev))
-		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
-	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
-	fbc_ctl |= obj->fence_reg;
-	I915_WRITE(FBC_CONTROL, fbc_ctl);
-
-	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
-		      cfb_pitch, crtc->y, intel_crtc->plane);
-}
-
-static bool i8xx_fbc_enabled(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->fb;
-	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj = intel_fb->obj;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
-	unsigned long stall_watermark = 200;
-	u32 dpfc_ctl;
-
-	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
-	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
-	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
-
-	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
-		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
-		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
-	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-
-	/* enable it... */
-	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
-
-	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void g4x_disable_fbc(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 dpfc_ctl;
-
-	/* Disable compression */
-	dpfc_ctl = I915_READ(DPFC_CONTROL);
-	if (dpfc_ctl & DPFC_CTL_EN) {
-		dpfc_ctl &= ~DPFC_CTL_EN;
-		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
-		DRM_DEBUG_KMS("disabled FBC\n");
-	}
-}
-
-static bool g4x_fbc_enabled(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 blt_ecoskpd;
-
-	/* Make sure blitter notifies FBC of writes */
-	gen6_gt_force_wake_get(dev_priv);
-	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
-	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
-		GEN6_BLITTER_LOCK_SHIFT;
-	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
-	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
-	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
-	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
-			 GEN6_BLITTER_LOCK_SHIFT);
-	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
-	POSTING_READ(GEN6_BLITTER_ECOSKPD);
-	gen6_gt_force_wake_put(dev_priv);
-}
-
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->fb;
-	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj = intel_fb->obj;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
-	unsigned long stall_watermark = 200;
-	u32 dpfc_ctl;
-
-	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
-	dpfc_ctl &= DPFC_RESERVED;
-	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
-	/* Set persistent mode for front-buffer rendering, ala X. */
-	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
-	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
-	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
-
-	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
-		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
-		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
-	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
-	/* enable it... */
-	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
-	if (IS_GEN6(dev)) {
-		I915_WRITE(SNB_DPFC_CTL_SA,
-			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
-		sandybridge_blit_fbc_update(dev);
-	}
-
-	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void ironlake_disable_fbc(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 dpfc_ctl;
-
-	/* Disable compression */
-	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
-	if (dpfc_ctl & DPFC_CTL_EN) {
-		dpfc_ctl &= ~DPFC_CTL_EN;
-		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
-		DRM_DEBUG_KMS("disabled FBC\n");
-	}
-}
-
-static bool ironlake_fbc_enabled(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-bool intel_fbc_enabled(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (!dev_priv->display.fbc_enabled)
-		return false;
-
-	return dev_priv->display.fbc_enabled(dev);
-}
-
-static void intel_fbc_work_fn(void *arg, int pending)
-{
-	struct intel_fbc_work *work = arg;
-	struct drm_device *dev = work->crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	DRM_LOCK(dev);
-	if (work == dev_priv->fbc_work) {
-		/* Double check that we haven't switched fb without cancelling
-		 * the prior work.
-		 */
-		if (work->crtc->fb == work->fb) {
-			dev_priv->display.enable_fbc(work->crtc,
-						     work->interval);
-
-			dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
-			dev_priv->cfb_fb = work->crtc->fb->base.id;
-			dev_priv->cfb_y = work->crtc->y;
-		}
-
-		dev_priv->fbc_work = NULL;
-	}
-	DRM_UNLOCK(dev);
-
-	free(work, DRM_MEM_KMS);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
-	u_int pending;
-
-	if (dev_priv->fbc_work == NULL)
-		return;
-
-	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
-	/* Synchronisation is provided by struct_mutex and checking of
-	 * dev_priv->fbc_work, so we can perform the cancellation
-	 * entirely asynchronously.
-	 */
-	if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
-	    &pending) == 0)
-		/* tasklet was killed before being run, clean up */
-		free(dev_priv->fbc_work, DRM_MEM_KMS);
-
-	/* Mark the work as no longer wanted so that if it does
-	 * wake-up (because the work was already running and waiting
-	 * for our mutex), it will discover that is no longer
-	 * necessary to run.
-	 */
-	dev_priv->fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
-	struct intel_fbc_work *work;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (!dev_priv->display.enable_fbc)
-		return;
-
-	intel_cancel_fbc_work(dev_priv);
-
-	work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
-	work->crtc = crtc;
-	work->fb = crtc->fb;
-	work->interval = interval;
-	TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
-	    work);
-
-	dev_priv->fbc_work = work;
-
-	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
-
-	/* Delay the actual enabling to let pageflipping cease and the
-	 * display to settle before starting the compression. Note that
-	 * this delay also serves a second purpose: it allows for a
-	 * vblank to pass after disabling the FBC before we attempt
-	 * to modify the control registers.
-	 *
-	 * A more complicated solution would involve tracking vblanks
-	 * following the termination of the page-flipping sequence
-	 * and indeed performing the enable as a co-routine and not
-	 * waiting synchronously upon the vblank.
-	 */
-	taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
-	    msecs_to_jiffies(50));
-}
-
-void intel_disable_fbc(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	intel_cancel_fbc_work(dev_priv);
-
-	if (!dev_priv->display.disable_fbc)
-		return;
-
-	dev_priv->display.disable_fbc(dev);
-	dev_priv->cfb_plane = -1;
-}
-
-/**
- * intel_update_fbc - enable/disable FBC as needed
- * @dev: the drm_device
- *
- * Set up the framebuffer compression hardware at mode set time.  We
- * enable it if possible:
- *   - plane A only (on pre-965)
- *   - no pixel mulitply/line duplication
- *   - no alpha buffer discard
- *   - no dual wide
- *   - framebuffer <= 2048 in width, 1536 in height
- *
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one.  It also must reside (along with the line length buffer) in
- * stolen memory.
- *
- * We need to enable/disable FBC on a global basis.
- */
-static void intel_update_fbc(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = NULL, *tmp_crtc;
-	struct intel_crtc *intel_crtc;
-	struct drm_framebuffer *fb;
-	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj;
-	int enable_fbc;
-
-	DRM_DEBUG_KMS("\n");
-
-	if (!i915_powersave)
-		return;
-
-	if (!I915_HAS_FBC(dev))
-		return;
-
-	/*
-	 * If FBC is already on, we just have to verify that we can
-	 * keep it that way...
-	 * Need to disable if:
-	 *   - more than one pipe is active
-	 *   - changing FBC params (stride, fence, mode)
-	 *   - new fb is too large to fit in compressed buffer
-	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
-	 */
-	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
-		if (tmp_crtc->enabled && tmp_crtc->fb) {
-			if (crtc) {
-				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
-				goto out_disable;
-			}
-			crtc = tmp_crtc;
-		}
-	}
-
-	if (!crtc || crtc->fb == NULL) {
-		DRM_DEBUG_KMS("no output, disabling\n");
-		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
-		goto out_disable;
-	}
-
-	intel_crtc = to_intel_crtc(crtc);
-	fb = crtc->fb;
-	intel_fb = to_intel_framebuffer(fb);
-	obj = intel_fb->obj;
-
-	enable_fbc = i915_enable_fbc;
-	if (enable_fbc < 0) {
-		DRM_DEBUG_KMS("fbc set to per-chip default\n");
-		enable_fbc = 1;
-		if (INTEL_INFO(dev)->gen <= 6)
-			enable_fbc = 0;
-	}
-	if (!enable_fbc) {
-		DRM_DEBUG_KMS("fbc disabled per module param\n");
-		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
-		goto out_disable;
-	}
-	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
-		DRM_DEBUG_KMS("framebuffer too large, disabling "
-			      "compression\n");
-		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
-		goto out_disable;
-	}
-	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
-	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
-		DRM_DEBUG_KMS("mode incompatible with compression, "
-			      "disabling\n");
-		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
-		goto out_disable;
-	}
-	if ((crtc->mode.hdisplay > 2048) ||
-	    (crtc->mode.vdisplay > 1536)) {
-		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
-		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
-		goto out_disable;
-	}
-	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
-		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
-		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
-		goto out_disable;
-	}
-	if (obj->tiling_mode != I915_TILING_X ||
-	    obj->fence_reg == I915_FENCE_REG_NONE) {
-		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
-		dev_priv->no_fbc_reason = FBC_NOT_TILED;
-		goto out_disable;
-	}
-
-	/* If the kernel debugger is active, always disable compression */
-	if (kdb_active)
-		goto out_disable;
-
-	/* If the scanout has not changed, don't modify the FBC settings.
-	 * Note that we make the fundamental assumption that the fb->obj
-	 * cannot be unpinned (and have its GTT offset and fence revoked)
-	 * without first being decoupled from the scanout and FBC disabled.
-	 */
-	if (dev_priv->cfb_plane == intel_crtc->plane &&
-	    dev_priv->cfb_fb == fb->base.id &&
-	    dev_priv->cfb_y == crtc->y)
-		return;
-
-	if (intel_fbc_enabled(dev)) {
-		/* We update FBC along two paths, after changing fb/crtc
-		 * configuration (modeswitching) and after page-flipping
-		 * finishes. For the latter, we know that not only did
-		 * we disable the FBC at the start of the page-flip
-		 * sequence, but also more than one vblank has passed.
-		 *
-		 * For the former case of modeswitching, it is possible
-		 * to switch between two FBC valid configurations
-		 * instantaneously so we do need to disable the FBC
-		 * before we can modify its control registers. We also
-		 * have to wait for the next vblank for that to take
-		 * effect. However, since we delay enabling FBC we can
-		 * assume that a vblank has passed since disabling and
-		 * that we can safely alter the registers in the deferred
-		 * callback.
-		 *
-		 * In the scenario that we go from a valid to invalid
-		 * and then back to valid FBC configuration we have
-		 * no strict enforcement that a vblank occurred since
-		 * disabling the FBC. However, along all current pipe
-		 * disabling paths we do need to wait for a vblank at
-		 * some point. And we wait before enabling FBC anyway.
-		 */
-		DRM_DEBUG_KMS("disabling active FBC for update\n");
-		intel_disable_fbc(dev);
-	}
-
-	intel_enable_fbc(crtc, 500);
-	return;
-
-out_disable:
-	/* Multiple disables should be harmless */
-	if (intel_fbc_enabled(dev)) {
-		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
-		intel_disable_fbc(dev);
-	}
-}
-
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
 			   struct drm_i915_gem_object *obj,
@@ -2052,19 +1822,17 @@
 	 * framebuffer compression.  For simplicity, we always install
 	 * a fence as the cost is not that onerous.
 	 */
-	if (obj->tiling_mode != I915_TILING_NONE) {
-		ret = i915_gem_object_get_fence(obj, pipelined);
-		if (ret)
-			goto err_unpin;
+	ret = i915_gem_object_get_fence(obj);
+	if (ret)
+		goto err_unpin;
 
-		i915_gem_object_pin_fence(obj);
-	}
+	i915_gem_object_pin_fence(obj);
 
 	dev_priv->mm.interruptible = true;
 	return 0;
 
 err_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_unpin_from_display_plane(obj);
 err_interruptible:
 	dev_priv->mm.interruptible = true;
 	return ret;
@@ -2073,7 +1841,7 @@
 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
 {
 	i915_gem_object_unpin_fence(obj);
-	i915_gem_object_unpin(obj);
+	i915_gem_object_unpin_from_display_plane(obj);
 }
 
 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2139,7 +1907,7 @@
 		      Start, Offset, x, y, fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		I915_WRITE(DSPSURF(plane), Start);
+		I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPADDR(plane), Offset);
 	} else
@@ -2224,7 +1992,7 @@
 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
 		      Start, Offset, x, y, fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
-	I915_WRITE(DSPSURF(plane), Start);
+	I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
 	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 	I915_WRITE(DSPADDR(plane), Offset);
 	POSTING_READ(reg);
@@ -2239,16 +2007,12 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
 
-	ret = dev_priv->display.update_plane(crtc, fb, x, y);
-	if (ret)
-		return ret;
-
-	intel_update_fbc(dev);
+	if (dev_priv->display.disable_fbc)
+		dev_priv->display.disable_fbc(dev);
 	intel_increase_pllclock(crtc);
 
-	return 0;
+	return dev_priv->display.update_plane(crtc, fb, x, y);
 }
 
 static int
@@ -2261,8 +2025,8 @@
 	int ret;
 
 	mtx_lock(&dev->event_lock);
-	while (!atomic_read(&dev_priv->mm.wedged) &&
-	    atomic_read(&obj->pending_flip) != 0) {
+	while (!atomic_load_acq_int(&dev_priv->mm.wedged) &&
+	    atomic_load_acq_int(&obj->pending_flip) != 0) {
 		msleep(&obj->pending_flip, &dev->event_lock,
 		    0, "915flp", 0);
 	}
@@ -2287,11 +2051,8 @@
 		    struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-#if 0
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
-#else
-	drm_i915_private_t *dev_priv = dev->dev_private;
-#endif
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int ret;
 
@@ -2301,16 +2062,10 @@
 		return 0;
 	}
 
-	switch (intel_crtc->plane) {
-	case 0:
-	case 1:
-		break;
-	case 2:
-		if (IS_IVYBRIDGE(dev))
-			break;
-		/* fall through otherwise */
-	default:
-		DRM_ERROR("no plane for crtc\n");
+	if(intel_crtc->plane > dev_priv->num_pipe) {
+		DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
+				intel_crtc->plane,
+				dev_priv->num_pipe);
 		return -EINVAL;
 	}
 
@@ -2327,8 +2082,7 @@
 	if (old_fb)
 		intel_finish_fb(old_fb);
 
-	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
-					 LEAVE_ATOMIC_MODE_SET);
+	ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
 	if (ret) {
 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
 		DRM_UNLOCK(dev);
@@ -2341,9 +2095,9 @@
 		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
 	}
 
+	intel_update_fbc(dev);
 	DRM_UNLOCK(dev);
 
-#if 0
 	if (!dev->primary->master)
 		return 0;
 
@@ -2358,20 +2112,7 @@
 		master_priv->sarea_priv->pipeA_x = x;
 		master_priv->sarea_priv->pipeA_y = y;
 	}
-#else
 
-	if (!dev_priv->sarea_priv)
-		return 0;
-
-	if (intel_crtc->pipe) {
-		dev_priv->sarea_priv->planeB_x = x;
-		dev_priv->sarea_priv->planeB_y = y;
-	} else {
-		dev_priv->sarea_priv->planeA_x = x;
-		dev_priv->sarea_priv->planeA_y = y;
-	}
-#endif
-
 	return 0;
 }
 
@@ -2576,7 +2317,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	u32 reg, temp, i;
+	u32 reg, temp, i, retry;
 
 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 	   for train result */
@@ -2628,15 +2369,20 @@
 		POSTING_READ(reg);
 		DELAY(500);
 
-		reg = FDI_RX_IIR(pipe);
-		temp = I915_READ(reg);
-		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+		for (retry = 0; retry < 5; retry++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = I915_READ(reg);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
-		if (temp & FDI_RX_BIT_LOCK) {
-			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-			DRM_DEBUG_KMS("FDI train 1 done.\n");
+			if (temp & FDI_RX_BIT_LOCK) {
+				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+				DRM_DEBUG_KMS("FDI train 1 done.\n");
+				break;
+			}
+			DELAY(50);
+		}
+		if (retry < 5)
 			break;
-		}
 	}
 	if (i == 4)
 		DRM_ERROR("FDI train 1 fail!\n");
@@ -2677,15 +2423,20 @@
 		POSTING_READ(reg);
 		DELAY(500);
 
-		reg = FDI_RX_IIR(pipe);
-		temp = I915_READ(reg);
-		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+		for (retry = 0; retry < 5; retry++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = I915_READ(reg);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
-		if (temp & FDI_RX_SYMBOL_LOCK) {
-			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-			DRM_DEBUG_KMS("FDI train 2 done.\n");
+			if (temp & FDI_RX_SYMBOL_LOCK) {
+				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+				DRM_DEBUG_KMS("FDI train 2 done.\n");
+				break;
+			}
+			DELAY(50);
+		}
+		if (retry < 5)
 			break;
-		}
 	}
 	if (i == 4)
 		DRM_ERROR("FDI train 2 fail!\n");
@@ -2834,15 +2585,19 @@
 	POSTING_READ(reg);
 	DELAY(200);
 
-	/* Enable CPU FDI TX PLL, always on for Ironlake */
-	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
-		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+	/* On Haswell, the PLL configuration for ports and pipes is handled
+	 * separately, as part of DDI setup */
+	if (!IS_HASWELL(dev)) {
+		/* Enable CPU FDI TX PLL, always on for Ironlake */
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+			I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
 
-		POSTING_READ(reg);
-		DELAY(100);
-	}
+			POSTING_READ(reg);
+			DELAY(100);
+		}
+ 	}
 }
 
 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
@@ -2915,42 +2670,16 @@
 	DELAY(100);
 }
 
-/*
- * When we disable a pipe, we need to clear any pending scanline wait events
- * to avoid hanging the ring, which we assume we are waiting on.
- */
-static void intel_clear_scanline_wait(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ring_buffer *ring;
-	u32 tmp;
-
-	if (IS_GEN2(dev))
-		/* Can't break the hang on i8xx */
-		return;
-
-	ring = LP_RING(dev_priv);
-	tmp = I915_READ_CTL(ring);
-	if (tmp & RING_WAIT)
-		I915_WRITE_CTL(ring, tmp);
-}
-
 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
-	struct drm_i915_gem_object *obj;
-	struct drm_i915_private *dev_priv;
-	struct drm_device *dev;
+	struct drm_device *dev = crtc->dev;
 
 	if (crtc->fb == NULL)
 		return;
 
-	obj = to_intel_framebuffer(crtc->fb)->obj;
-	dev = crtc->dev;
-	dev_priv = dev->dev_private;
-	mtx_lock(&dev->event_lock);
-	while (atomic_read(&obj->pending_flip) != 0)
-		msleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0);
-	mtx_unlock(&dev->event_lock);
+	DRM_LOCK(dev);
+	intel_finish_fb(crtc->fb);
+	DRM_UNLOCK(dev);
 }
 
 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@ -2967,6 +2696,23 @@
 		if (encoder->base.crtc != crtc)
 			continue;
 
+		/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
+		 * CPU handles all others */
+		if (IS_HASWELL(dev)) {
+			/* It is still unclear how this will work on PPT, so throw up a warning */
+			if (!HAS_PCH_LPT(dev))
+				DRM_DEBUG_KMS("Haswell: PPT\n");
+
+			if (encoder->type == DRM_MODE_ENCODER_DAC) {
+				DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
+				return true;
+			} else {
+				DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
+						encoder->type);
+				return false;
+			}
+		}
+
 		switch (encoder->type) {
 		case INTEL_OUTPUT_EDP:
 			if (!intel_encoder_is_pch_edp(&encoder->base))
@@ -2978,6 +2724,99 @@
 	return true;
 }
 
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 divsel, phaseinc, auxdiv, phasedir = 0;
+	u32 temp;
+
+	/* It is necessary to ungate the pixclk gate prior to programming
+	 * the divisors, and gate it back when it is done.
+	 */
+	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+	/* Disable SSCCTL */
+	intel_sbi_write(dev_priv, SBI_SSCCTL6,
+				intel_sbi_read(dev_priv, SBI_SSCCTL6) |
+					SBI_SSCCTL_DISABLE);
+
+	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
+	if (crtc->mode.clock == 20000) {
+		auxdiv = 1;
+		divsel = 0x41;
+		phaseinc = 0x20;
+	} else {
+		/* The iCLK virtual clock root frequency is in MHz,
+		 * but the crtc->mode.clock in in KHz. To get the divisors,
+		 * it is necessary to divide one by another, so we
+		 * convert the virtual clock precision to KHz here for higher
+		 * precision.
+		 */
+		u32 iclk_virtual_root_freq = 172800 * 1000;
+		u32 iclk_pi_range = 64;
+		u32 desired_divisor, msb_divisor_value, pi_value;
+
+		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+		msb_divisor_value = desired_divisor / iclk_pi_range;
+		pi_value = desired_divisor % iclk_pi_range;
+
+		auxdiv = 0;
+		divsel = msb_divisor_value - 2;
+		phaseinc = pi_value;
+	}
+
+	/* This should not happen with any sane values */
+	if ((SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+	     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK))
+		DRM_DEBUG_KMS("DIVSEL_MASK");
+	if ((SBI_SSCDIVINTPHASE_DIR(phasedir) &
+	     ~SBI_SSCDIVINTPHASE_INCVAL_MASK))
+		DRM_DEBUG_KMS("INCVAL_MASK");
+
+	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+			crtc->mode.clock,
+			auxdiv,
+			divsel,
+			phasedir,
+			phaseinc);
+
+	/* Program SSCDIVINTPHASE6 */
+	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+
+	intel_sbi_write(dev_priv,
+			SBI_SSCDIVINTPHASE6,
+			temp);
+
+	/* Program SSCAUXDIV */
+	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+	intel_sbi_write(dev_priv,
+			SBI_SSCAUXDIV6,
+			temp);
+
+
+	/* Enable modulator and associated divider */
+	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+	temp &= ~SBI_SSCCTL_DISABLE;
+	intel_sbi_write(dev_priv,
+			SBI_SSCCTL6,
+			temp);
+
+	/* Wait for initialization time */
+	DELAY(24);
+
+	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
 /*
  * Enable PCH resources required for PCH ports:
  *   - PCH PLLs
@@ -2992,29 +2831,41 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	u32 reg, temp, transc_sel;
+	u32 reg, temp;
 
+	assert_transcoder_disabled(dev_priv, pipe);
+
 	/* For PCH output, training FDI link */
 	dev_priv->display.fdi_link_train(crtc);
 
-	intel_enable_pch_pll(dev_priv, pipe);
+	intel_enable_pch_pll(intel_crtc);
 
-	if (HAS_PCH_CPT(dev)) {
-		transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
-			TRANSC_DPLLB_SEL;
+	if (HAS_PCH_LPT(dev)) {
+		DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
+		lpt_program_iclkip(crtc);
+	} else if (HAS_PCH_CPT(dev)) {
+		u32 sel;
 
-		/* Be sure PCH DPLL SEL is set */
 		temp = I915_READ(PCH_DPLL_SEL);
-		if (pipe == 0) {
-			temp &= ~(TRANSA_DPLLB_SEL);
-			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
-		} else if (pipe == 1) {
-			temp &= ~(TRANSB_DPLLB_SEL);
-			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
-		} else if (pipe == 2) {
-			temp &= ~(TRANSC_DPLLB_SEL);
-			temp |= (TRANSC_DPLL_ENABLE | transc_sel);
+		switch (pipe) {
+		default:
+		case 0:
+			temp |= TRANSA_DPLL_ENABLE;
+			sel = TRANSA_DPLLB_SEL;
+			break;
+		case 1:
+			temp |= TRANSB_DPLL_ENABLE;
+			sel = TRANSB_DPLLB_SEL;
+			break;
+		case 2:
+			temp |= TRANSC_DPLL_ENABLE;
+			sel = TRANSC_DPLLB_SEL;
+			break;
 		}
+		if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
+			temp |= sel;
+		else
+			temp &= ~sel;
 		I915_WRITE(PCH_DPLL_SEL, temp);
 	}
 
@@ -3029,7 +2880,8 @@
 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
 	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
 
-	intel_fdi_normal_train(crtc);
+	if (!IS_HASWELL(dev))
+		intel_fdi_normal_train(crtc);
 
 	/* For PCH DP, enable TRANS_DP_CTL */
 	if (HAS_PCH_CPT(dev) &&
@@ -3072,6 +2924,93 @@
 	intel_enable_transcoder(dev_priv, pipe);
 }
 
+static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
+{
+	struct intel_pch_pll *pll = intel_crtc->pch_pll;
+
+	if (pll == NULL)
+		return;
+
+	if (pll->refcount == 0) {
+		printf("bad PCH PLL refcount\n");
+		return;
+	}
+
+	--pll->refcount;
+	intel_crtc->pch_pll = NULL;
+}
+
+static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
+{
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	struct intel_pch_pll *pll;
+	int i;
+
+	pll = intel_crtc->pch_pll;
+	if (pll) {
+		DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
+			      intel_crtc->base.base.id, pll->pll_reg);
+		goto prepare;
+	}
+
+	if (HAS_PCH_IBX(dev_priv->dev)) {
+		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+		i = intel_crtc->pipe;
+		pll = &dev_priv->pch_plls[i];
+
+		DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
+			      intel_crtc->base.base.id, pll->pll_reg);
+
+		goto found;
+	}
+
+	for (i = 0; i < dev_priv->num_pch_pll; i++) {
+		pll = &dev_priv->pch_plls[i];
+
+		/* Only want to check enabled timings first */
+		if (pll->refcount == 0)
+			continue;
+
+		if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
+		    fp == I915_READ(pll->fp0_reg)) {
+			DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
+				      intel_crtc->base.base.id,
+				      pll->pll_reg, pll->refcount, pll->active);
+
+			goto found;
+		}
+	}
+
+	/* Ok no matching timings, maybe there's a free one? */
+	for (i = 0; i < dev_priv->num_pch_pll; i++) { /* XXXKIB: HACK */
+		pll = &dev_priv->pch_plls[i];
+		if (pll->refcount == 0) {
+			DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
+				      intel_crtc->base.base.id, pll->pll_reg);
+			goto found;
+		}
+	}
+
+	return NULL;
+
+found:
+	intel_crtc->pch_pll = pll;
+	pll->refcount++;
+	DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
+prepare: /* separate function? */
+	DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
+
+	/* Wait for the clocks to stabilize before rewriting the regs */
+	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+	POSTING_READ(pll->pll_reg);
+	DELAY(150);
+
+	I915_WRITE(pll->fp0_reg, fp);
+	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+	pll->on = false;
+	return pll;
+}
+
 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3213,8 +3152,7 @@
 	}
 
 	/* disable PCH DPLL */
-	if (!intel_crtc->no_pll)
-		intel_disable_pch_pll(dev_priv, pipe);
+	intel_disable_pch_pll(intel_crtc);
 
 	/* Switch from PCDclk to Rawclk */
 	reg = FDI_RX_CTL(pipe);
@@ -3242,7 +3180,6 @@
 
 	DRM_LOCK(dev);
 	intel_update_fbc(dev);
-	intel_clear_scanline_wait(dev);
 	DRM_UNLOCK(dev);
 }
 
@@ -3270,6 +3207,12 @@
 	}
 }
 
+static void ironlake_crtc_off(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	intel_put_pch_pll(intel_crtc);
+}
+
 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
 {
 	if (!enable && intel_crtc->overlay) {
@@ -3341,7 +3284,6 @@
 	intel_crtc->active = false;
 	intel_update_fbc(dev);
 	intel_update_watermarks(dev);
-	intel_clear_scanline_wait(dev);
 }
 
 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -3361,6 +3303,10 @@
 	}
 }
 
+static void i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
 /**
  * Sets the power management mode of the pipe and plane.
  */
@@ -3368,9 +3314,7 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-#if 0
 	struct drm_i915_master_private *master_priv;
-#endif
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	bool enabled;
@@ -3382,7 +3326,6 @@
 
 	dev_priv->display.dpms(crtc, mode);
 
-#if 0
 	if (!dev->primary->master)
 		return;
 
@@ -3389,31 +3332,17 @@
 	master_priv = dev->primary->master->driver_priv;
 	if (!master_priv->sarea_priv)
 		return;
-#else
-	if (!dev_priv->sarea_priv)
-		return;
-#endif
 
 	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
 
 	switch (pipe) {
 	case 0:
-#if 0
 		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
 		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
-#else
-		dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
-		dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
-#endif
 		break;
 	case 1:
-#if 0
 		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
 		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
-#else
-		dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
-		dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
-#endif
 		break;
 	default:
 		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
@@ -3425,26 +3354,12 @@
 {
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	/* Flush any pending WAITs before we disable the pipe. Note that
-	 * we need to drop the struct_mutex in order to acquire it again
-	 * during the lowlevel dpms routines around a couple of the
-	 * operations. It does not look trivial nor desirable to move
-	 * that locking higher. So instead we leave a window for the
-	 * submission of further commands on the fb before we can actually
-	 * disable it. This race with userspace exists anyway, and we can
-	 * only rely on the pipe being disabled by userspace after it
-	 * receives the hotplug notification and has flushed any pending
-	 * batches.
-	 */
-	if (crtc->fb) {
-		DRM_LOCK(dev);
-		intel_finish_fb(crtc->fb);
-		DRM_UNLOCK(dev);
-	}
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+	dev_priv->display.off(crtc);
 
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
 
 	if (crtc->fb) {
@@ -3493,8 +3408,7 @@
 {
 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 	struct drm_device *dev = encoder->dev;
-	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 
 	/* lvds has its own version of commit see intel_lvds_commit */
 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3512,7 +3426,7 @@
 }
 
 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
-				  struct drm_display_mode *mode,
+				  const struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3532,6 +3446,11 @@
 	return true;
 }
 
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+	return 400000; /* FIXME */
+}
+
 static int i945_get_display_clock_speed(struct drm_device *dev)
 {
 	return 400000;
@@ -3551,7 +3470,7 @@
 {
 	u16 gcfgc = 0;
 
-	gcfgc = pci_read_config(dev->device, GCFGC, 2);
+	gcfgc = pci_read_config(dev->dev, GCFGC, 2);
 
 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
 		return 133000;
@@ -3629,1341 +3548,6 @@
 	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
 }
 
-
-struct intel_watermark_params {
-	unsigned long fifo_size;
-	unsigned long max_wm;
-	unsigned long default_wm;
-	unsigned long guard_size;
-	unsigned long cacheline_size;
-};
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
-	PINEVIEW_DISPLAY_FIFO,
-	PINEVIEW_MAX_WM,
-	PINEVIEW_DFT_WM,
-	PINEVIEW_GUARD_WM,
-	PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
-	PINEVIEW_DISPLAY_FIFO,
-	PINEVIEW_MAX_WM,
-	PINEVIEW_DFT_HPLLOFF_WM,
-	PINEVIEW_GUARD_WM,
-	PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_cursor_wm = {
-	PINEVIEW_CURSOR_FIFO,
-	PINEVIEW_CURSOR_MAX_WM,
-	PINEVIEW_CURSOR_DFT_WM,
-	PINEVIEW_CURSOR_GUARD_WM,
-	PINEVIEW_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
-	PINEVIEW_CURSOR_FIFO,
-	PINEVIEW_CURSOR_MAX_WM,
-	PINEVIEW_CURSOR_DFT_WM,
-	PINEVIEW_CURSOR_GUARD_WM,
-	PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params g4x_wm_info = {
-	G4X_FIFO_SIZE,
-	G4X_MAX_WM,
-	G4X_MAX_WM,
-	2,
-	G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params g4x_cursor_wm_info = {
-	I965_CURSOR_FIFO,
-	I965_CURSOR_MAX_WM,
-	I965_CURSOR_DFT_WM,
-	2,
-	G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i965_cursor_wm_info = {
-	I965_CURSOR_FIFO,
-	I965_CURSOR_MAX_WM,
-	I965_CURSOR_DFT_WM,
-	2,
-	I915_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i945_wm_info = {
-	I945_FIFO_SIZE,
-	I915_MAX_WM,
-	1,
-	2,
-	I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i915_wm_info = {
-	I915_FIFO_SIZE,
-	I915_MAX_WM,
-	1,
-	2,
-	I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i855_wm_info = {
-	I855GM_FIFO_SIZE,
-	I915_MAX_WM,
-	1,
-	2,
-	I830_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i830_wm_info = {
-	I830_FIFO_SIZE,
-	I915_MAX_WM,
-	1,
-	2,
-	I830_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params ironlake_display_wm_info = {
-	ILK_DISPLAY_FIFO,
-	ILK_DISPLAY_MAXWM,
-	ILK_DISPLAY_DFTWM,
-	2,
-	ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
-	ILK_CURSOR_FIFO,
-	ILK_CURSOR_MAXWM,
-	ILK_CURSOR_DFTWM,
-	2,
-	ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
-	ILK_DISPLAY_SR_FIFO,
-	ILK_DISPLAY_MAX_SRWM,
-	ILK_DISPLAY_DFT_SRWM,
-	2,
-	ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
-	ILK_CURSOR_SR_FIFO,
-	ILK_CURSOR_MAX_SRWM,
-	ILK_CURSOR_DFT_SRWM,
-	2,
-	ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
-	SNB_DISPLAY_FIFO,
-	SNB_DISPLAY_MAXWM,
-	SNB_DISPLAY_DFTWM,
-	2,
-	SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
-	SNB_CURSOR_FIFO,
-	SNB_CURSOR_MAXWM,
-	SNB_CURSOR_DFTWM,
-	2,
-	SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
-	SNB_DISPLAY_SR_FIFO,
-	SNB_DISPLAY_MAX_SRWM,
-	SNB_DISPLAY_DFT_SRWM,
-	2,
-	SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
-	SNB_CURSOR_SR_FIFO,
-	SNB_CURSOR_MAX_SRWM,
-	SNB_CURSOR_DFT_SRWM,
-	2,
-	SNB_FIFO_LINE_SIZE
-};
-
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @clock_in_khz: pixel clock
- * @wm: chip FIFO params
- * @pixel_size: display pixel size
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again).  Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size.  When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point.  If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
-					const struct intel_watermark_params *wm,
-					int fifo_size,
-					int pixel_size,
-					unsigned long latency_ns)
-{
-	long entries_required, wm_size;
-
-	/*
-	 * Note: we need to make sure we don't overflow for various clock &
-	 * latency values.
-	 * clocks go from a few thousand to several hundred thousand.
-	 * latency is usually a few thousand
-	 */
-	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
-		1000;
-	entries_required = howmany(entries_required, wm->cacheline_size);
-
-	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
-
-	wm_size = fifo_size - (entries_required + wm->guard_size);
-
-	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
-
-	/* Don't promote wm_size to unsigned... */
-	if (wm_size > (long)wm->max_wm)
-		wm_size = wm->max_wm;
-	if (wm_size <= 0)
-		wm_size = wm->default_wm;
-	return wm_size;
-}
-
-struct cxsr_latency {
-	int is_desktop;
-	int is_ddr3;
-	unsigned long fsb_freq;
-	unsigned long mem_freq;
-	unsigned long display_sr;
-	unsigned long display_hpll_disable;
-	unsigned long cursor_sr;
-	unsigned long cursor_hpll_disable;
-};
-
-static const struct cxsr_latency cxsr_latency_table[] = {
-	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
-	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
-	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
-	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
-	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
-
-	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
-	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
-	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
-	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
-	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
-
-	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
-	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
-	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
-	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
-	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
-
-	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
-	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
-	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
-	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
-	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
-
-	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
-	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
-	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
-	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
-	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
-
-	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
-	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
-	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
-	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
-	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
-							 int is_ddr3,
-							 int fsb,
-							 int mem)
-{
-	const struct cxsr_latency *latency;
-	int i;
-
-	if (fsb == 0 || mem == 0)
-		return NULL;
-
-	for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
-		latency = &cxsr_latency_table[i];
-		if (is_desktop == latency->is_desktop &&
-		    is_ddr3 == latency->is_ddr3 &&
-		    fsb == latency->fsb_freq && mem == latency->mem_freq)
-			return latency;
-	}
-
-	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
-	return NULL;
-}
-
-static void pineview_disable_cxsr(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* deactivate cxsr */
-	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- *   - memory configuration (speed, channels)
- *   - chipset
- *   - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value.  It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int latency_ns = 5000;
-
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dsparb = I915_READ(DSPARB);
-	int size;
-
-	size = dsparb & 0x7f;
-	if (plane)
-		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
-	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-		      plane ? "B" : "A", size);
-
-	return size;
-}
-
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dsparb = I915_READ(DSPARB);
-	int size;
-
-	size = dsparb & 0x1ff;
-	if (plane)
-		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
-	size >>= 1; /* Convert to cachelines */
-
-	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-		      plane ? "B" : "A", size);
-
-	return size;
-}
-
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dsparb = I915_READ(DSPARB);
-	int size;
-
-	size = dsparb & 0x7f;
-	size >>= 2; /* Convert to cachelines */
-
-	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-		      plane ? "B" : "A",
-		      size);
-
-	return size;
-}
-
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dsparb = I915_READ(DSPARB);
-	int size;
-
-	size = dsparb & 0x7f;
-	size >>= 1; /* Convert to cachelines */
-
-	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-		      plane ? "B" : "A", size);
-
-	return size;
-}
-
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
-{
-	struct drm_crtc *crtc, *enabled = NULL;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		if (crtc->enabled && crtc->fb) {
-			if (enabled)
-				return NULL;
-			enabled = crtc;
-		}
-	}
-
-	return enabled;
-}
-
-static void pineview_update_wm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
-	const struct cxsr_latency *latency;
-	u32 reg;
-	unsigned long wm;
-
-	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
-					 dev_priv->fsb_freq, dev_priv->mem_freq);
-	if (!latency) {
-		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-		pineview_disable_cxsr(dev);
-		return;
-	}
-
-	crtc = single_enabled_crtc(dev);
-	if (crtc) {
-		int clock = crtc->mode.clock;
-		int pixel_size = crtc->fb->bits_per_pixel / 8;
-
-		/* Display SR */
-		wm = intel_calculate_wm(clock, &pineview_display_wm,
-					pineview_display_wm.fifo_size,
-					pixel_size, latency->display_sr);
-		reg = I915_READ(DSPFW1);
-		reg &= ~DSPFW_SR_MASK;
-		reg |= wm << DSPFW_SR_SHIFT;
-		I915_WRITE(DSPFW1, reg);
-		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
-		/* cursor SR */
-		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
-					pineview_display_wm.fifo_size,
-					pixel_size, latency->cursor_sr);
-		reg = I915_READ(DSPFW3);
-		reg &= ~DSPFW_CURSOR_SR_MASK;
-		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
-		I915_WRITE(DSPFW3, reg);
-
-		/* Display HPLL off SR */
-		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
-					pineview_display_hplloff_wm.fifo_size,
-					pixel_size, latency->display_hpll_disable);
-		reg = I915_READ(DSPFW3);
-		reg &= ~DSPFW_HPLL_SR_MASK;
-		reg |= wm & DSPFW_HPLL_SR_MASK;
-		I915_WRITE(DSPFW3, reg);
-
-		/* cursor HPLL off SR */
-		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
-					pineview_display_hplloff_wm.fifo_size,
-					pixel_size, latency->cursor_hpll_disable);
-		reg = I915_READ(DSPFW3);
-		reg &= ~DSPFW_HPLL_CURSOR_MASK;
-		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
-		I915_WRITE(DSPFW3, reg);
-		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
-		/* activate cxsr */
-		I915_WRITE(DSPFW3,
-			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
-		DRM_DEBUG_KMS("Self-refresh is enabled\n");
-	} else {
-		pineview_disable_cxsr(dev);
-		DRM_DEBUG_KMS("Self-refresh is disabled\n");
-	}
-}
-
-static bool g4x_compute_wm0(struct drm_device *dev,
-			    int plane,
-			    const struct intel_watermark_params *display,
-			    int display_latency_ns,
-			    const struct intel_watermark_params *cursor,
-			    int cursor_latency_ns,
-			    int *plane_wm,
-			    int *cursor_wm)
-{
-	struct drm_crtc *crtc;
-	int htotal, hdisplay, clock, pixel_size;
-	int line_time_us, line_count;
-	int entries, tlb_miss;
-
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	if (crtc->fb == NULL || !crtc->enabled) {
-		*cursor_wm = cursor->guard_size;
-		*plane_wm = display->guard_size;
-		return false;
-	}
-
-	htotal = crtc->mode.htotal;
-	hdisplay = crtc->mode.hdisplay;
-	clock = crtc->mode.clock;
-	pixel_size = crtc->fb->bits_per_pixel / 8;
-
-	/* Use the small buffer method to calculate plane watermark */
-	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
-	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
-	if (tlb_miss > 0)
-		entries += tlb_miss;
-	entries = howmany(entries, display->cacheline_size);
-	*plane_wm = entries + display->guard_size;
-	if (*plane_wm > (int)display->max_wm)
-		*plane_wm = display->max_wm;
-
-	/* Use the large buffer method to calculate cursor watermark */
-	line_time_us = ((htotal * 1000) / clock);
-	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
-	entries = line_count * 64 * pixel_size;
-	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
-	if (tlb_miss > 0)
-		entries += tlb_miss;
-	entries = howmany(entries, cursor->cacheline_size);
-	*cursor_wm = entries + cursor->guard_size;
-	if (*cursor_wm > (int)cursor->max_wm)
-		*cursor_wm = (int)cursor->max_wm;
-
-	return true;
-}
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool g4x_check_srwm(struct drm_device *dev,
-			   int display_wm, int cursor_wm,
-			   const struct intel_watermark_params *display,
-			   const struct intel_watermark_params *cursor)
-{
-	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
-		      display_wm, cursor_wm);
-
-	if (display_wm > display->max_wm) {
-		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
-			      display_wm, display->max_wm);
-		return false;
-	}
-
-	if (cursor_wm > cursor->max_wm) {
-		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
-			      cursor_wm, cursor->max_wm);
-		return false;
-	}
-
-	if (!(display_wm || cursor_wm)) {
-		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
-		return false;
-	}
-
-	return true;
-}
-
-static bool g4x_compute_srwm(struct drm_device *dev,
-			     int plane,
-			     int latency_ns,
-			     const struct intel_watermark_params *display,
-			     const struct intel_watermark_params *cursor,
-			     int *display_wm, int *cursor_wm)
-{
-	struct drm_crtc *crtc;
-	int hdisplay, htotal, pixel_size, clock;
-	unsigned long line_time_us;
-	int line_count, line_size;
-	int small, large;
-	int entries;
-
-	if (!latency_ns) {
-		*display_wm = *cursor_wm = 0;
-		return false;
-	}
-
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	hdisplay = crtc->mode.hdisplay;
-	htotal = crtc->mode.htotal;
-	clock = crtc->mode.clock;
-	pixel_size = crtc->fb->bits_per_pixel / 8;
-
-	line_time_us = (htotal * 1000) / clock;
-	line_count = (latency_ns / line_time_us + 1000) / 1000;
-	line_size = hdisplay * pixel_size;
-
-	/* Use the minimum of the small and large buffer method for primary */
-	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-	large = line_count * line_size;
-
-	entries = howmany(min(small, large), display->cacheline_size);
-	*display_wm = entries + display->guard_size;
-
-	/* calculate the self-refresh watermark for display cursor */
-	entries = line_count * pixel_size * 64;
-	entries = howmany(entries, cursor->cacheline_size);
-	*cursor_wm = entries + cursor->guard_size;
-
-	return g4x_check_srwm(dev,
-			      *display_wm, *cursor_wm,
-			      display, cursor);
-}
-
-#define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
-
-static void g4x_update_wm(struct drm_device *dev)
-{
-	static const int sr_latency_ns = 12000;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
-	int plane_sr, cursor_sr;
-	unsigned int enabled = 0;
-
-	if (g4x_compute_wm0(dev, 0,
-			    &g4x_wm_info, latency_ns,
-			    &g4x_cursor_wm_info, latency_ns,
-			    &planea_wm, &cursora_wm))
-		enabled |= 1;
-
-	if (g4x_compute_wm0(dev, 1,
-			    &g4x_wm_info, latency_ns,
-			    &g4x_cursor_wm_info, latency_ns,
-			    &planeb_wm, &cursorb_wm))
-		enabled |= 2;
-
-	plane_sr = cursor_sr = 0;
-	if (single_plane_enabled(enabled) &&
-	    g4x_compute_srwm(dev, ffs(enabled) - 1,
-			     sr_latency_ns,
-			     &g4x_wm_info,
-			     &g4x_cursor_wm_info,
-			     &plane_sr, &cursor_sr))
-		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
-	else
-		I915_WRITE(FW_BLC_SELF,
-			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
-
-	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
-		      planea_wm, cursora_wm,
-		      planeb_wm, cursorb_wm,
-		      plane_sr, cursor_sr);
-
-	I915_WRITE(DSPFW1,
-		   (plane_sr << DSPFW_SR_SHIFT) |
-		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
-		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
-		   planea_wm);
-	I915_WRITE(DSPFW2,
-		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
-		   (cursora_wm << DSPFW_CURSORA_SHIFT));
-	/* HPLL off in SR has some issues on G4x... disable it */
-	I915_WRITE(DSPFW3,
-		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
-		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i965_update_wm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
-	int srwm = 1;
-	int cursor_sr = 16;
-
-	/* Calc sr entries for one plane configs */
-	crtc = single_enabled_crtc(dev);
-	if (crtc) {
-		/* self-refresh has much higher latency */
-		static const int sr_latency_ns = 12000;
-		int clock = crtc->mode.clock;
-		int htotal = crtc->mode.htotal;
-		int hdisplay = crtc->mode.hdisplay;
-		int pixel_size = crtc->fb->bits_per_pixel / 8;
-		unsigned long line_time_us;
-		int entries;
-
-		line_time_us = ((htotal * 1000) / clock);
-
-		/* Use ns/us then divide to preserve precision */
-		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * hdisplay;
-		entries = howmany(entries, I915_FIFO_LINE_SIZE);
-		srwm = I965_FIFO_SIZE - entries;
-		if (srwm < 0)
-			srwm = 1;
-		srwm &= 0x1ff;
-		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
-			      entries, srwm);
-
-		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * 64;
-		entries = howmany(entries, i965_cursor_wm_info.cacheline_size);
-		cursor_sr = i965_cursor_wm_info.fifo_size -
-			(entries + i965_cursor_wm_info.guard_size);
-
-		if (cursor_sr > i965_cursor_wm_info.max_wm)
-			cursor_sr = i965_cursor_wm_info.max_wm;
-
-		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
-			      "cursor %d\n", srwm, cursor_sr);
-
-		if (IS_CRESTLINE(dev))
-			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
-	} else {
-		/* Turn off self refresh if both pipes are enabled */
-		if (IS_CRESTLINE(dev))
-			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
-				   & ~FW_BLC_SELF_EN);
-	}
-
-	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
-		      srwm);
-
-	/* 965 has limitations... */
-	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
-		   (8 << 16) | (8 << 8) | (8 << 0));
-	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
-	/* update cursor SR watermark */
-	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i9xx_update_wm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const struct intel_watermark_params *wm_info;
-	uint32_t fwater_lo;
-	uint32_t fwater_hi;
-	int cwm, srwm = 1;
-	int fifo_size;
-	int planea_wm, planeb_wm;
-	struct drm_crtc *crtc, *enabled = NULL;
-
-	if (IS_I945GM(dev))
-		wm_info = &i945_wm_info;
-	else if (!IS_GEN2(dev))
-		wm_info = &i915_wm_info;
-	else
-		wm_info = &i855_wm_info;
-
-	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
-	crtc = intel_get_crtc_for_plane(dev, 0);
-	if (crtc->enabled && crtc->fb) {
-		planea_wm = intel_calculate_wm(crtc->mode.clock,
-					       wm_info, fifo_size,
-					       crtc->fb->bits_per_pixel / 8,
-					       latency_ns);
-		enabled = crtc;
-	} else
-		planea_wm = fifo_size - wm_info->guard_size;
-
-	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
-	crtc = intel_get_crtc_for_plane(dev, 1);
-	if (crtc->enabled && crtc->fb) {
-		planeb_wm = intel_calculate_wm(crtc->mode.clock,
-					       wm_info, fifo_size,
-					       crtc->fb->bits_per_pixel / 8,
-					       latency_ns);
-		if (enabled == NULL)
-			enabled = crtc;
-		else
-			enabled = NULL;
-	} else
-		planeb_wm = fifo_size - wm_info->guard_size;
-
-	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
-	/*
-	 * Overlay gets an aggressive default since video jitter is bad.
-	 */
-	cwm = 2;
-
-	/* Play safe and disable self-refresh before adjusting watermarks. */
-	if (IS_I945G(dev) || IS_I945GM(dev))
-		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
-	else if (IS_I915GM(dev))
-		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
-
-	/* Calc sr entries for one plane configs */
-	if (HAS_FW_BLC(dev) && enabled) {
-		/* self-refresh has much higher latency */
-		static const int sr_latency_ns = 6000;
-		int clock = enabled->mode.clock;
-		int htotal = enabled->mode.htotal;
-		int hdisplay = enabled->mode.hdisplay;
-		int pixel_size = enabled->fb->bits_per_pixel / 8;
-		unsigned long line_time_us;
-		int entries;
-
-		line_time_us = (htotal * 1000) / clock;
-
-		/* Use ns/us then divide to preserve precision */
-		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * hdisplay;
-		entries = howmany(entries, wm_info->cacheline_size);
-		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
-		srwm = wm_info->fifo_size - entries;
-		if (srwm < 0)
-			srwm = 1;
-
-		if (IS_I945G(dev) || IS_I945GM(dev))
-			I915_WRITE(FW_BLC_SELF,
-				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
-		else if (IS_I915GM(dev))
-			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
-	}
-
-	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
-		      planea_wm, planeb_wm, cwm, srwm);
-
-	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
-	fwater_hi = (cwm & 0x1f);
-
-	/* Set request length to 8 cachelines per fetch */
-	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
-	fwater_hi = fwater_hi | (1 << 8);
-
-	I915_WRITE(FW_BLC, fwater_lo);
-	I915_WRITE(FW_BLC2, fwater_hi);
-
-	if (HAS_FW_BLC(dev)) {
-		if (enabled) {
-			if (IS_I945G(dev) || IS_I945GM(dev))
-				I915_WRITE(FW_BLC_SELF,
-					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
-			else if (IS_I915GM(dev))
-				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
-			DRM_DEBUG_KMS("memory self refresh enabled\n");
-		} else
-			DRM_DEBUG_KMS("memory self refresh disabled\n");
-	}
-}
-
-static void i830_update_wm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
-	uint32_t fwater_lo;
-	int planea_wm;
-
-	crtc = single_enabled_crtc(dev);
-	if (crtc == NULL)
-		return;
-
-	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
-				       dev_priv->display.get_fifo_size(dev, 0),
-				       crtc->fb->bits_per_pixel / 8,
-				       latency_ns);
-	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
-	fwater_lo |= (3<<8) | planea_wm;
-
-	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
-
-	I915_WRITE(FW_BLC, fwater_lo);
-}
-
-#define ILK_LP0_PLANE_LATENCY		700
-#define ILK_LP0_CURSOR_LATENCY		1300
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
-				int fbc_wm, int display_wm, int cursor_wm,
-				const struct intel_watermark_params *display,
-				const struct intel_watermark_params *cursor)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
-		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
-	if (fbc_wm > SNB_FBC_MAX_SRWM) {
-		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
-			      fbc_wm, SNB_FBC_MAX_SRWM, level);
-
-		/* fbc has it's own way to disable FBC WM */
-		I915_WRITE(DISP_ARB_CTL,
-			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
-		return false;
-	}
-
-	if (display_wm > display->max_wm) {
-		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
-			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
-		return false;
-	}
-
-	if (cursor_wm > cursor->max_wm) {
-		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
-			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
-		return false;
-	}
-
-	if (!(fbc_wm || display_wm || cursor_wm)) {
-		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
-		return false;
-	}
-
-	return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
-				  int latency_ns,
-				  const struct intel_watermark_params *display,
-				  const struct intel_watermark_params *cursor,
-				  int *fbc_wm, int *display_wm, int *cursor_wm)
-{
-	struct drm_crtc *crtc;
-	unsigned long line_time_us;
-	int hdisplay, htotal, pixel_size, clock;
-	int line_count, line_size;
-	int small, large;
-	int entries;
-
-	if (!latency_ns) {
-		*fbc_wm = *display_wm = *cursor_wm = 0;
-		return false;
-	}
-
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	hdisplay = crtc->mode.hdisplay;
-	htotal = crtc->mode.htotal;
-	clock = crtc->mode.clock;
-	pixel_size = crtc->fb->bits_per_pixel / 8;
-
-	line_time_us = (htotal * 1000) / clock;
-	line_count = (latency_ns / line_time_us + 1000) / 1000;
-	line_size = hdisplay * pixel_size;
-
-	/* Use the minimum of the small and large buffer method for primary */
-	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-	large = line_count * line_size;
-
-	entries = howmany(min(small, large), display->cacheline_size);
-	*display_wm = entries + display->guard_size;
-
-	/*
-	 * Spec says:
-	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
-	 */
-	*fbc_wm = howmany(*display_wm * 64, line_size) + 2;
-
-	/* calculate the self-refresh watermark for display cursor */
-	entries = line_count * pixel_size * 64;
-	entries = howmany(entries, cursor->cacheline_size);
-	*cursor_wm = entries + cursor->guard_size;
-
-	return ironlake_check_srwm(dev, level,
-				   *fbc_wm, *display_wm, *cursor_wm,
-				   display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int fbc_wm, plane_wm, cursor_wm;
-	unsigned int enabled;
-
-	enabled = 0;
-	if (g4x_compute_wm0(dev, 0,
-			    &ironlake_display_wm_info,
-			    ILK_LP0_PLANE_LATENCY,
-			    &ironlake_cursor_wm_info,
-			    ILK_LP0_CURSOR_LATENCY,
-			    &plane_wm, &cursor_wm)) {
-		I915_WRITE(WM0_PIPEA_ILK,
-			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-			      " plane %d, " "cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1;
-	}
-
-	if (g4x_compute_wm0(dev, 1,
-			    &ironlake_display_wm_info,
-			    ILK_LP0_PLANE_LATENCY,
-			    &ironlake_cursor_wm_info,
-			    ILK_LP0_CURSOR_LATENCY,
-			    &plane_wm, &cursor_wm)) {
-		I915_WRITE(WM0_PIPEB_ILK,
-			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-			      " plane %d, cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 2;
-	}
-
-	/*
-	 * Calculate and update the self-refresh watermark only when one
-	 * display plane is used.
-	 */
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
-
-	if (!single_plane_enabled(enabled))
-		return;
-	enabled = ffs(enabled) - 1;
-
-	/* WM1 */
-	if (!ironlake_compute_srwm(dev, 1, enabled,
-				   ILK_READ_WM1_LATENCY() * 500,
-				   &ironlake_display_srwm_info,
-				   &ironlake_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM1_LP_ILK,
-		   WM1_LP_SR_EN |
-		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/* WM2 */
-	if (!ironlake_compute_srwm(dev, 2, enabled,
-				   ILK_READ_WM2_LATENCY() * 500,
-				   &ironlake_display_srwm_info,
-				   &ironlake_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM2_LP_ILK,
-		   WM2_LP_EN |
-		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/*
-	 * WM3 is unsupported on ILK, probably because we don't have latency
-	 * data for that power state
-	 */
-}
-
-void sandybridge_update_wm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
-	u32 val;
-	int fbc_wm, plane_wm, cursor_wm;
-	unsigned int enabled;
-
-	enabled = 0;
-	if (g4x_compute_wm0(dev, 0,
-			    &sandybridge_display_wm_info, latency,
-			    &sandybridge_cursor_wm_info, latency,
-			    &plane_wm, &cursor_wm)) {
-		val = I915_READ(WM0_PIPEA_ILK);
-		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-		I915_WRITE(WM0_PIPEA_ILK, val |
-			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-			      " plane %d, " "cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1;
-	}
-
-	if (g4x_compute_wm0(dev, 1,
-			    &sandybridge_display_wm_info, latency,
-			    &sandybridge_cursor_wm_info, latency,
-			    &plane_wm, &cursor_wm)) {
-		val = I915_READ(WM0_PIPEB_ILK);
-		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-		I915_WRITE(WM0_PIPEB_ILK, val |
-			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-			      " plane %d, cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 2;
-	}
-
-	/* IVB has 3 pipes */
-	if (IS_IVYBRIDGE(dev) &&
-	    g4x_compute_wm0(dev, 2,
-			    &sandybridge_display_wm_info, latency,
-			    &sandybridge_cursor_wm_info, latency,
-			    &plane_wm, &cursor_wm)) {
-		val = I915_READ(WM0_PIPEC_IVB);
-		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-		I915_WRITE(WM0_PIPEC_IVB, val |
-			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
-			      " plane %d, cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 3;
-	}
-
-	/*
-	 * Calculate and update the self-refresh watermark only when one
-	 * display plane is used.
-	 *
-	 * SNB support 3 levels of watermark.
-	 *
-	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
-	 * and disabled in the descending order
-	 *
-	 */
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
-
-	if (!single_plane_enabled(enabled) ||
-	    dev_priv->sprite_scaling_enabled)
-		return;
-	enabled = ffs(enabled) - 1;
-
-	/* WM1 */
-	if (!ironlake_compute_srwm(dev, 1, enabled,
-				   SNB_READ_WM1_LATENCY() * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM1_LP_ILK,
-		   WM1_LP_SR_EN |
-		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/* WM2 */
-	if (!ironlake_compute_srwm(dev, 2, enabled,
-				   SNB_READ_WM2_LATENCY() * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM2_LP_ILK,
-		   WM2_LP_EN |
-		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/* WM3 */
-	if (!ironlake_compute_srwm(dev, 3, enabled,
-				   SNB_READ_WM3_LATENCY() * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM3_LP_ILK,
-		   WM3_LP_EN |
-		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-}
-
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
-			      uint32_t sprite_width, int pixel_size,
-			      const struct intel_watermark_params *display,
-			      int display_latency_ns, int *sprite_wm)
-{
-	struct drm_crtc *crtc;
-	int clock;
-	int entries, tlb_miss;
-
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	if (crtc->fb == NULL || !crtc->enabled) {
-		*sprite_wm = display->guard_size;
-		return false;
-	}
-
-	clock = crtc->mode.clock;
-
-	/* Use the small buffer method to calculate the sprite watermark */
-	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
-	tlb_miss = display->fifo_size*display->cacheline_size -
-		sprite_width * 8;
-	if (tlb_miss > 0)
-		entries += tlb_miss;
-	entries = howmany(entries, display->cacheline_size);
-	*sprite_wm = entries + display->guard_size;
-	if (*sprite_wm > (int)display->max_wm)
-		*sprite_wm = display->max_wm;
-
-	return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
-				uint32_t sprite_width, int pixel_size,
-				const struct intel_watermark_params *display,
-				int latency_ns, int *sprite_wm)
-{
-	struct drm_crtc *crtc;
-	unsigned long line_time_us;
-	int clock;
-	int line_count, line_size;
-	int small, large;
-	int entries;
-
-	if (!latency_ns) {
-		*sprite_wm = 0;
-		return false;
-	}
-
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	clock = crtc->mode.clock;
-	if (!clock) {
-		*sprite_wm = 0;
-		return false;
-	}
-
-	line_time_us = (sprite_width * 1000) / clock;
-	if (!line_time_us) {
-		*sprite_wm = 0;
-		return false;
-	}
-
-	line_count = (latency_ns / line_time_us + 1000) / 1000;
-	line_size = sprite_width * pixel_size;
-
-	/* Use the minimum of the small and large buffer method for primary */
-	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-	large = line_count * line_size;
-
-	entries = howmany(min(small, large), display->cacheline_size);
-	*sprite_wm = entries + display->guard_size;
-
-	return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
-					 uint32_t sprite_width, int pixel_size)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
-	u32 val;
-	int sprite_wm, reg;
-	int ret;
-
-	switch (pipe) {
-	case 0:
-		reg = WM0_PIPEA_ILK;
-		break;
-	case 1:
-		reg = WM0_PIPEB_ILK;
-		break;
-	case 2:
-		reg = WM0_PIPEC_IVB;
-		break;
-	default:
-		return; /* bad pipe */
-	}
-
-	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
-					    &sandybridge_display_wm_info,
-					    latency, &sprite_wm);
-	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
-			      pipe);
-		return;
-	}
-
-	val = I915_READ(reg);
-	val &= ~WM0_PIPE_SPRITE_MASK;
-	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
-	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
-
-
-	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-					      pixel_size,
-					      &sandybridge_display_srwm_info,
-					      SNB_READ_WM1_LATENCY() * 500,
-					      &sprite_wm);
-	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
-			      pipe);
-		return;
-	}
-	I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
-	/* Only IVB has two more LP watermarks for sprite */
-	if (!IS_IVYBRIDGE(dev))
-		return;
-
-	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-					      pixel_size,
-					      &sandybridge_display_srwm_info,
-					      SNB_READ_WM2_LATENCY() * 500,
-					      &sprite_wm);
-	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
-			      pipe);
-		return;
-	}
-	I915_WRITE(WM2S_LP_IVB, sprite_wm);
-
-	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-					      pixel_size,
-					      &sandybridge_display_srwm_info,
-					      SNB_READ_WM3_LATENCY() * 500,
-					      &sprite_wm);
-	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
-			      pipe);
-		return;
-	}
-	I915_WRITE(WM3S_LP_IVB, sprite_wm);
-}
-
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- *   - normal (i.e. non-self-refresh)
- *   - self-refresh (SR) mode
- *   - lines are large relative to FIFO size (buffer can hold up to 2)
- *   - lines are small relative to FIFO size (buffer can hold more than 2
- *     lines), so need to account for TLB latency
- *
- *   The normal calculation is:
- *     watermark = dotclock * bytes per pixel * latency
- *   where latency is platform & configuration dependent (we assume pessimal
- *   values here).
- *
- *   The SR calculation is:
- *     watermark = (trunc(latency/line time)+1) * surface width *
- *       bytes per pixel
- *   where
- *     line time = htotal / dotclock
- *     surface width = hdisplay for normal plane and 64 for cursor
- *   and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that.  And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-static void intel_update_watermarks(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->display.update_wm)
-		dev_priv->display.update_wm(dev);
-}
-
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
-				    uint32_t sprite_width, int pixel_size)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->display.update_sprite_wm)
-		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
-						   pixel_size);
-}
-
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 {
 	if (i915_panel_use_ssc >= 0)
@@ -5187,6 +3771,222 @@
 	}
 }
 
+static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
+			      struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 temp;
+
+	temp = I915_READ(LVDS);
+	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+	if (pipe == 1) {
+		temp |= LVDS_PIPEB_SELECT;
+	} else {
+		temp &= ~LVDS_PIPEB_SELECT;
+	}
+	/* set the corresponsding LVDS_BORDER bit */
+	temp |= dev_priv->lvds_border_bits;
+	/* Set the B0-B3 data pairs corresponding to whether we're going to
+	 * set the DPLLs for dual-channel mode or not.
+	 */
+	if (clock->p2 == 7)
+		temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+	else
+		temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+	/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+	 * appropriately here, but we need to look more thoroughly into how
+	 * panels behave in the two modes.
+	 */
+	/* set the dithering flag on LVDS as needed */
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (dev_priv->lvds_dither)
+			temp |= LVDS_ENABLE_DITHER;
+		else
+			temp &= ~LVDS_ENABLE_DITHER;
+	}
+	temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+		temp |= LVDS_HSYNC_POLARITY;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+		temp |= LVDS_VSYNC_POLARITY;
+	I915_WRITE(LVDS, temp);
+}
+
+static void i9xx_update_pll(struct drm_crtc *crtc,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode,
+			    intel_clock_t *clock, intel_clock_t *reduced_clock,
+			    int num_connectors)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 dpll;
+	bool is_sdvo;
+
+	is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+		intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+	dpll = DPLL_VGA_MODE_DIS;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+		dpll |= DPLLB_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+	if (is_sdvo) {
+		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+		if (pixel_multiplier > 1) {
+			if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+				dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+		}
+		dpll |= DPLL_DVO_HIGH_SPEED;
+	}
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+		dpll |= DPLL_DVO_HIGH_SPEED;
+
+	/* compute bitmask from p1 value */
+	if (IS_PINEVIEW(dev))
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+	else {
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+		if (IS_G4X(dev) && reduced_clock)
+			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+	}
+	switch (clock->p2) {
+	case 5:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+		break;
+	case 7:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+		break;
+	case 10:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+		break;
+	case 14:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+		break;
+	}
+	if (INTEL_INFO(dev)->gen >= 4)
+		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+	if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+		dpll |= PLL_REF_INPUT_TVCLKINBC;
+	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+		/* XXX: just matching BIOS for now */
+		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
+		dpll |= 3;
+	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+	POSTING_READ(DPLL(pipe));
+	DELAY(150);
+
+	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+	 * This is an exception to the general rule that mode_set doesn't turn
+	 * things on.
+	 */
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+		intel_update_lvds(crtc, clock, adjusted_mode);
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+		intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+	I915_WRITE(DPLL(pipe), dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(DPLL(pipe));
+	DELAY(150);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		u32 temp = 0;
+		if (is_sdvo) {
+			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+			if (temp > 1)
+				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+			else
+				temp = 0;
+		}
+		I915_WRITE(DPLL_MD(pipe), temp);
+	} else {
+		/* The pixel multiplier can only be updated once the
+		 * DPLL is enabled and the clocks are stable.
+		 *
+		 * So write it again.
+		 */
+		I915_WRITE(DPLL(pipe), dpll);
+	}
+}
+
+static void i8xx_update_pll(struct drm_crtc *crtc,
+			    struct drm_display_mode *adjusted_mode,
+			    intel_clock_t *clock,
+			    int num_connectors)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 dpll;
+
+	dpll = DPLL_VGA_MODE_DIS;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	} else {
+		if (clock->p1 == 2)
+			dpll |= PLL_P1_DIVIDE_BY_TWO;
+		else
+			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+		if (clock->p2 == 4)
+			dpll |= PLL_P2_DIVIDE_BY_4;
+	}
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+		/* XXX: just matching BIOS for now */
+		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
+		dpll |= 3;
+	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+	POSTING_READ(DPLL(pipe));
+	DELAY(150);
+
+	I915_WRITE(DPLL(pipe), dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(DPLL(pipe));
+	DELAY(150);
+
+	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+	 * This is an exception to the general rule that mode_set doesn't turn
+	 * things on.
+	 */
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+		intel_update_lvds(crtc, clock, adjusted_mode);
+
+	/* The pixel multiplier can only be updated once the
+	 * DPLL is enabled and the clocks are stable.
+	 *
+	 * So write it again.
+	 */
+	I915_WRITE(DPLL(pipe), dpll);
+}
+
 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 			      struct drm_display_mode *mode,
 			      struct drm_display_mode *adjusted_mode,
@@ -5200,15 +4000,13 @@
 	int plane = intel_crtc->plane;
 	int refclk, num_connectors = 0;
 	intel_clock_t clock, reduced_clock;
-	u32 dpll, dspcntr, pipeconf, vsyncshift;
-	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
-	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+	u32 dspcntr, pipeconf, vsyncshift;
+	bool ok, has_reduced_clock = false, is_sdvo = false;
+	bool is_lvds = false, is_tv = false, is_dp = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct intel_encoder *encoder;
 	const intel_limit_t *limit;
 	int ret;
-	u32 temp;
-	u32 lvds_sync = 0;
 
 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
 		if (encoder->base.crtc != crtc)
@@ -5224,15 +4022,9 @@
 			if (encoder->needs_tv_clock)
 				is_tv = true;
 			break;
-		case INTEL_OUTPUT_DVO:
-			is_dvo = true;
-			break;
 		case INTEL_OUTPUT_TVOUT:
 			is_tv = true;
 			break;
-		case INTEL_OUTPUT_ANALOG:
-			is_crt = true;
-			break;
 		case INTEL_OUTPUT_DISPLAYPORT:
 			is_dp = true;
 			break;
@@ -5279,71 +4071,12 @@
 	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
 				 &reduced_clock : NULL);
 
-	dpll = DPLL_VGA_MODE_DIS;
-
-	if (!IS_GEN2(dev)) {
-		if (is_lvds)
-			dpll |= DPLLB_MODE_LVDS;
-		else
-			dpll |= DPLLB_MODE_DAC_SERIAL;
-		if (is_sdvo) {
-			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
-			if (pixel_multiplier > 1) {
-				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
-			}
-			dpll |= DPLL_DVO_HIGH_SPEED;
-		}
-		if (is_dp)
-			dpll |= DPLL_DVO_HIGH_SPEED;
-
-		/* compute bitmask from p1 value */
-		if (IS_PINEVIEW(dev))
-			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
-		else {
-			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-			if (IS_G4X(dev) && has_reduced_clock)
-				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-		}
-		switch (clock.p2) {
-		case 5:
-			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
-			break;
-		case 7:
-			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
-			break;
-		case 10:
-			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
-			break;
-		case 14:
-			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
-			break;
-		}
-		if (INTEL_INFO(dev)->gen >= 4)
-			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-	} else {
-		if (is_lvds) {
-			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-		} else {
-			if (clock.p1 == 2)
-				dpll |= PLL_P1_DIVIDE_BY_TWO;
-			else
-				dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-			if (clock.p2 == 4)
-				dpll |= PLL_P2_DIVIDE_BY_4;
-		}
-	}
-
-	if (is_sdvo && is_tv)
-		dpll |= PLL_REF_INPUT_TVCLKINBC;
-	else if (is_tv)
-		/* XXX: just matching BIOS for now */
-		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
-		dpll |= 3;
-	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
-		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	if (IS_GEN2(dev))
+		i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
 	else
-		dpll |= PLL_REF_INPUT_DREFCLK;
+		i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
+				has_reduced_clock ? &reduced_clock : NULL,
+				num_connectors);
 
 	/* setup pipeconf */
 	pipeconf = I915_READ(PIPECONF(pipe));
@@ -5380,97 +4113,9 @@
 		}
 	}
 
-	dpll |= DPLL_VCO_ENABLE;
-
 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
 	drm_mode_debug_printmodeline(mode);
 
-	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
-	POSTING_READ(DPLL(pipe));
-	DELAY(150);
-
-	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
-	 * This is an exception to the general rule that mode_set doesn't turn
-	 * things on.
-	 */
-	if (is_lvds) {
-		temp = I915_READ(LVDS);
-		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
-		if (pipe == 1) {
-			temp |= LVDS_PIPEB_SELECT;
-		} else {
-			temp &= ~LVDS_PIPEB_SELECT;
-		}
-		/* set the corresponsding LVDS_BORDER bit */
-		temp |= dev_priv->lvds_border_bits;
-		/* Set the B0-B3 data pairs corresponding to whether we're going to
-		 * set the DPLLs for dual-channel mode or not.
-		 */
-		if (clock.p2 == 7)
-			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
-		else
-			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
-		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
-		 * appropriately here, but we need to look more thoroughly into how
-		 * panels behave in the two modes.
-		 */
-		/* set the dithering flag on LVDS as needed */
-		if (INTEL_INFO(dev)->gen >= 4) {
-			if (dev_priv->lvds_dither)
-				temp |= LVDS_ENABLE_DITHER;
-			else
-				temp &= ~LVDS_ENABLE_DITHER;
-		}
-		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-			lvds_sync |= LVDS_HSYNC_POLARITY;
-		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-			lvds_sync |= LVDS_VSYNC_POLARITY;
-		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
-		    != lvds_sync) {
-			char flags[2] = "-+";
-			DRM_INFO("Changing LVDS panel from "
-				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
-				 flags[!(temp & LVDS_HSYNC_POLARITY)],
-				 flags[!(temp & LVDS_VSYNC_POLARITY)],
-				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
-				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
-			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
-			temp |= lvds_sync;
-		}
-		I915_WRITE(LVDS, temp);
-	}
-
-	if (is_dp) {
-		intel_dp_set_m_n(crtc, mode, adjusted_mode);
-	}
-
-	I915_WRITE(DPLL(pipe), dpll);
-
-	/* Wait for the clocks to stabilize. */
-	POSTING_READ(DPLL(pipe));
-	DELAY(150);
-
-	if (INTEL_INFO(dev)->gen >= 4) {
-		temp = 0;
-		if (is_sdvo) {
-			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
-			if (temp > 1)
-				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-			else
-				temp = 0;
-		}
-		I915_WRITE(DPLL_MD(pipe), temp);
-	} else {
-		/* The pixel multiplier can only be updated once the
-		 * DPLL is enabled and the clocks are stable.
-		 *
-		 * So write it again.
-		 */
-		I915_WRITE(DPLL(pipe), dpll);
-	}
-
 	if (HAS_PIPE_CXSR(dev)) {
 		if (intel_crtc->lowfreq_avail) {
 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
@@ -5536,7 +4181,6 @@
 
 	I915_WRITE(DSPCNTR(plane), dspcntr);
 	POSTING_READ(DSPCNTR(plane));
-	intel_enable_plane(dev_priv, plane, pipe);
 
 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
@@ -5712,17 +4356,16 @@
 	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
 	bool ok, has_reduced_clock = false, is_sdvo = false;
 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
-	struct intel_encoder *has_edp_encoder = NULL;
 	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct intel_encoder *encoder;
+	struct intel_encoder *encoder, *edp_encoder = NULL;
 	const intel_limit_t *limit;
 	int ret;
 	struct fdi_m_n m_n = {0};
 	u32 temp;
-	u32 lvds_sync = 0;
 	int target_clock, pixel_multiplier, lane, link_bw, factor;
 	unsigned int pipe_bpp;
 	bool dither;
+	bool is_cpu_edp = false, is_pch_edp = false;
 
 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
 		if (encoder->base.crtc != crtc)
@@ -5748,7 +4391,12 @@
 			is_dp = true;
 			break;
 		case INTEL_OUTPUT_EDP:
-			has_edp_encoder = encoder;
+			is_dp = true;
+			if (intel_encoder_is_pch_edp(&encoder->base))
+				is_pch_edp = true;
+			else
+				is_cpu_edp = true;
+			edp_encoder = encoder;
 			break;
 		}
 
@@ -5811,15 +4459,13 @@
 	lane = 0;
 	/* CPU eDP doesn't require FDI link, so just set DP M/N
 	   according to current link config */
-	if (has_edp_encoder &&
-	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+	if (is_cpu_edp) {
 		target_clock = mode->clock;
-		intel_edp_link_config(has_edp_encoder,
-				      &lane, &link_bw);
+		intel_edp_link_config(edp_encoder, &lane, &link_bw);
 	} else {
 		/* [e]DP over FDI requires target mode clock
 		   instead of link clock */
-		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+		if (is_dp)
 			target_clock = mode->clock;
 		else
 			target_clock = adjusted_mode->clock;
@@ -5910,7 +4556,7 @@
 		}
 		dpll |= DPLL_DVO_HIGH_SPEED;
 	}
-	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+	if (is_dp && !is_cpu_edp)
 		dpll |= DPLL_DVO_HIGH_SPEED;
 
 	/* compute bitmask from p1 value */
@@ -5949,34 +4595,25 @@
 
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
-
 	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
 	drm_mode_debug_printmodeline(mode);
 
-	/* PCH eDP needs FDI, but CPU eDP does not */
-	if (!intel_crtc->no_pll) {
-		if (!has_edp_encoder ||
-		    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
-			I915_WRITE(PCH_FP0(pipe), fp);
-			I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+	/* CPU eDP is the only output that doesn't need a PCH PLL of its own on
+	 * pre-Haswell/LPT generation */
+	if (HAS_PCH_LPT(dev)) {
+		DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
+				pipe);
+	} else if (!is_cpu_edp) {
+		struct intel_pch_pll *pll;
 
-			POSTING_READ(PCH_DPLL(pipe));
-			DELAY(150);
-		}
-	} else {
-		if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
-		    fp == I915_READ(PCH_FP0(0))) {
-			intel_crtc->use_pll_a = true;
-			DRM_DEBUG_KMS("using pipe a dpll\n");
-		} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
-			   fp == I915_READ(PCH_FP0(1))) {
-			intel_crtc->use_pll_a = false;
-			DRM_DEBUG_KMS("using pipe b dpll\n");
-		} else {
-			DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
-			return -EINVAL;
-		}
-	}
+		pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+		if (pll == NULL) {
+			DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+					 pipe);
+ 			return -EINVAL;
+ 		}
+	} else
+		intel_put_pch_pll(intel_crtc);
 
 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
 	 * This is an exception to the general rule that mode_set doesn't turn
@@ -6009,22 +4646,11 @@
 		 * appropriately here, but we need to look more thoroughly into how
 		 * panels behave in the two modes.
 		 */
+		temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-			lvds_sync |= LVDS_HSYNC_POLARITY;
+			temp |= LVDS_HSYNC_POLARITY;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-			lvds_sync |= LVDS_VSYNC_POLARITY;
-		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
-		    != lvds_sync) {
-			char flags[2] = "-+";
-			DRM_INFO("Changing LVDS panel from "
-				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
-				 flags[!(temp & LVDS_HSYNC_POLARITY)],
-				 flags[!(temp & LVDS_VSYNC_POLARITY)],
-				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
-				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
-			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
-			temp |= lvds_sync;
-		}
+			temp |= LVDS_VSYNC_POLARITY;
 		I915_WRITE(PCH_LVDS, temp);
 	}
 
@@ -6034,7 +4660,7 @@
 		pipeconf |= PIPECONF_DITHER_EN;
 		pipeconf |= PIPECONF_DITHER_TYPE_SP;
 	}
-	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+	if (is_dp && !is_cpu_edp) {
 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
 	} else {
 		/* For non-DP output, clear any trans DP clock recovery setting.*/
@@ -6044,13 +4670,11 @@
 		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
 	}
 
-	if (!intel_crtc->no_pll &&
-	    (!has_edp_encoder ||
-	     intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
-		I915_WRITE(PCH_DPLL(pipe), dpll);
+	if (intel_crtc->pch_pll) {
+		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 
 		/* Wait for the clocks to stabilize. */
-		POSTING_READ(PCH_DPLL(pipe));
+		POSTING_READ(intel_crtc->pch_pll->pll_reg);
 		DELAY(150);
 
 		/* The pixel multiplier can only be updated once the
@@ -6058,13 +4682,13 @@
 		 *
 		 * So write it again.
 		 */
-		I915_WRITE(PCH_DPLL(pipe), dpll);
+		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 	}
 
 	intel_crtc->lowfreq_avail = false;
-	if (!intel_crtc->no_pll) {
+	if (intel_crtc->pch_pll) {
 		if (is_lvds && has_reduced_clock && i915_powersave) {
-			I915_WRITE(PCH_FP1(pipe), fp2);
+			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
 			intel_crtc->lowfreq_avail = true;
 			if (HAS_PIPE_CXSR(dev)) {
 				DRM_DEBUG_KMS("enabling CxSR downclocking\n");
@@ -6071,7 +4695,7 @@
 				pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
 			}
 		} else {
-			I915_WRITE(PCH_FP1(pipe), fp);
+			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
 			if (HAS_PIPE_CXSR(dev)) {
 				DRM_DEBUG_KMS("disabling CxSR downclocking\n");
 				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -6124,10 +4748,8 @@
 	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
 	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 
-	if (has_edp_encoder &&
-	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+	if (is_cpu_edp)
 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-	}
 
 	I915_WRITE(PIPECONF(pipe), pipeconf);
 	POSTING_READ(PIPECONF(pipe));
@@ -6141,6 +4763,8 @@
 
 	intel_update_watermarks(dev);
 
+	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
 	return ret;
 }
 
@@ -6499,7 +5123,7 @@
 	if (!visible && !intel_crtc->cursor_visible)
 		return;
 
-	if (IS_IVYBRIDGE(dev)) {
+	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 		I915_WRITE(CURPOS_IVB(pipe), pos);
 		ivb_update_cursor(crtc, base);
 	} else {
@@ -6509,9 +5133,6 @@
 		else
 			i9xx_update_cursor(crtc, base);
 	}
-
-	if (visible)
-		intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
 }
 
 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -6596,7 +5217,7 @@
 			if (intel_crtc->cursor_bo != obj)
 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
 		} else
-			i915_gem_object_unpin(intel_crtc->cursor_bo);
+			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
 		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
 	}
 
@@ -6611,7 +5232,7 @@
 
 	return 0;
 fail_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_unpin_from_display_plane(obj);
 fail_locked:
 	DRM_UNLOCK(dev);
 fail:
@@ -7039,7 +5660,6 @@
 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
 
 	drm_mode_set_name(mode);
-	drm_mode_set_crtcinfo(mode, 0);
 
 	return mode;
 }
@@ -7206,12 +5826,16 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	if (!dev_priv->busy)
+	if (!dev_priv->busy) {
+		intel_sanitize_pm(dev);
 		dev_priv->busy = true;
-	else
+	} else
 		callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
 		    intel_gpu_idle_timer, dev);
 
+	if (obj == NULL)
+		return;
+
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		if (!crtc->fb)
 			continue;
@@ -7333,7 +5957,7 @@
 	obj = work->old_fb_obj;
 
 	atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane);
-	if (atomic_read(&obj->pending_flip) == 0)
+	if (atomic_load_acq_int(&obj->pending_flip) == 0)
 		wakeup(&obj->pending_flip);
 	mtx_unlock(&dev->event_lock);
 
@@ -7384,18 +6008,19 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	unsigned long offset;
 	u32 flip_mask;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	int ret;
 
-	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 	if (ret)
-		goto out;
+		goto err;
 
 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
 
-	ret = BEGIN_LP_RING(6);
+	ret = intel_ring_begin(ring, 6);
 	if (ret)
-		goto out;
+		goto err_unpin;
 
 	/* Can't queue multiple flips, so wait for the previous
 	 * one to finish before executing the next.
@@ -7404,15 +6029,19 @@
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
-	OUT_RING(MI_NOOP);
-	OUT_RING(MI_DISPLAY_FLIP |
-		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	OUT_RING(fb->pitches[0]);
-	OUT_RING(obj->gtt_offset + offset);
-	OUT_RING(0); /* aux display base address, unused */
-	ADVANCE_LP_RING();
-out:
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, obj->gtt_offset + offset);
+	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
 	return ret;
 }
 
@@ -7425,33 +6054,38 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	unsigned long offset;
 	u32 flip_mask;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	int ret;
 
-	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 	if (ret)
-		goto out;
+		goto err;
 
 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
 
-	ret = BEGIN_LP_RING(6);
+	ret = intel_ring_begin(ring, 6);
 	if (ret)
-		goto out;
+		goto err_unpin;
 
 	if (intel_crtc->plane)
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
-	OUT_RING(MI_NOOP);
-	OUT_RING(MI_DISPLAY_FLIP_I915 |
-		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	OUT_RING(fb->pitches[0]);
-	OUT_RING(obj->gtt_offset + offset);
-	OUT_RING(MI_NOOP);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, obj->gtt_offset + offset);
+	intel_ring_emit(ring, MI_NOOP);
 
-	ADVANCE_LP_RING();
-out:
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
 	return ret;
 }
 
@@ -7463,24 +6097,25 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	int ret;
 
-	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 	if (ret)
-		goto out;
+		goto err;
 
-	ret = BEGIN_LP_RING(4);
+	ret = intel_ring_begin(ring, 4);
 	if (ret)
-		goto out;
+		goto err_unpin;
 
 	/* i965+ uses the linear or tiled offsets from the
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	OUT_RING(MI_DISPLAY_FLIP |
-		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	OUT_RING(fb->pitches[0]);
-	OUT_RING(obj->gtt_offset | obj->tiling_mode);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -7488,9 +6123,13 @@
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	OUT_RING(pf | pipesrc);
-	ADVANCE_LP_RING();
-out:
+	intel_ring_emit(ring, pf | pipesrc);
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
 	return ret;
 }
 
@@ -7501,21 +6140,22 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 	if (ret)
-		goto out;
+		goto err;
 
-	ret = BEGIN_LP_RING(4);
+	ret = intel_ring_begin(ring, 4);
 	if (ret)
-		goto out;
+		goto err_unpin;
 
-	OUT_RING(MI_DISPLAY_FLIP |
-		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	OUT_RING(fb->pitches[0] | obj->tiling_mode);
-	OUT_RING(obj->gtt_offset);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+	intel_ring_emit(ring, obj->gtt_offset);
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -7525,9 +6165,13 @@
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	OUT_RING(pf | pipesrc);
-	ADVANCE_LP_RING();
-out:
+	intel_ring_emit(ring, pf | pipesrc);
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
 	return ret;
 }
 
@@ -7549,11 +6193,11 @@
 
 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 	if (ret)
-		goto out;
+		goto err;
 
 	ret = intel_ring_begin(ring, 4);
 	if (ret)
-		goto out;
+		goto err_unpin;
 
 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
@@ -7560,7 +6204,11 @@
 	intel_ring_emit(ring, (obj->gtt_offset));
 	intel_ring_emit(ring, (MI_NOOP));
 	intel_ring_advance(ring);
-out:
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
 	return ret;
 }
 
@@ -7633,6 +6281,7 @@
 	if (ret)
 		goto cleanup_pending;
 	intel_disable_fbc(dev);
+	intel_mark_busy(dev, obj);
 	DRM_UNLOCK(dev);
 
 	CTR2(KTR_DRM, "i915_flip_request %d %p", intel_crtc->plane, obj);
@@ -7640,7 +6289,7 @@
 	return 0;
 
 cleanup_pending:
-	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+	atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
 	drm_gem_object_unreference(&work->old_fb_obj->base);
 	drm_gem_object_unreference(&obj->base);
 	DRM_UNLOCK(dev);
@@ -7661,10 +6310,11 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg, val;
+	int i;
 
 	/* Clear any frame start delays used for debugging left by the BIOS */
-	for_each_pipe(pipe) {
-		reg = PIPECONF(pipe);
+	for_each_pipe(i) {
+		reg = PIPECONF(i);
 		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 	}
 
@@ -7734,6 +6384,23 @@
 	.page_flip = intel_crtc_page_flip,
 };
 
+static void intel_pch_pll_init(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	if (dev_priv->num_pch_pll == 0) {
+		DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
+		return;
+	}
+
+	for (i = 0; i < dev_priv->num_pch_pll; i++) {
+		dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
+		dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
+		dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
+	}
+}
+
 static void intel_crtc_init(struct drm_device *dev, int pipe)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7772,8 +6439,6 @@
 	intel_crtc->bpp = 24; /* default for pre-Ironlake */
 
 	if (HAS_PCH_SPLIT(dev)) {
-		if (pipe == 2 && IS_IVYBRIDGE(dev))
-			intel_crtc->no_pll = true;
 		intel_helper_funcs.prepare = ironlake_crtc_prepare;
 		intel_helper_funcs.commit = ironlake_crtc_commit;
 	} else {
@@ -7785,21 +6450,18 @@
 
 	intel_crtc->busy = false;
 
-	callout_init(&intel_crtc->idle_callout, CALLOUT_MPSAFE);
+	callout_init(&intel_crtc->idle_callout, 1);
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
 	struct drm_mode_object *drmmode_obj;
 	struct intel_crtc *crtc;
 
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
 
 	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
 			DRM_MODE_OBJECT_CRTC);
@@ -7872,9 +6534,28 @@
 
 	intel_crt_init(dev);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_HASWELL(dev)) {
 		int found;
 
+		/* Haswell uses DDI functions to detect digital outputs */
+		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+		/* DDI A only supports eDP */
+		if (found)
+			intel_ddi_init(dev, PORT_A);
+
+		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
+		 * register */
+		found = I915_READ(SFUSE_STRAP);
+
+		if (found & SFUSE_STRAP_DDIB_DETECTED)
+			intel_ddi_init(dev, PORT_B);
+		if (found & SFUSE_STRAP_DDIC_DETECTED)
+			intel_ddi_init(dev, PORT_C);
+		if (found & SFUSE_STRAP_DDID_DETECTED)
+			intel_ddi_init(dev, PORT_D);
+	} else if (HAS_PCH_SPLIT(dev)) {
+		int found;
+
 		DRM_DEBUG_KMS(
 "HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
 		    (I915_READ(HDMIB) & PORT_DETECTED) != 0,
@@ -7887,7 +6568,7 @@
 
 		if (I915_READ(HDMIB) & PORT_DETECTED) {
 			/* PCH SDVOB multiplex with HDMIB */
-			found = intel_sdvo_init(dev, PCH_SDVOB);
+			found = intel_sdvo_init(dev, PCH_SDVOB, true);
 			if (!found)
 				intel_hdmi_init(dev, HDMIB);
 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -7911,7 +6592,7 @@
 
 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
 			DRM_DEBUG_KMS("probing SDVOB\n");
-			found = intel_sdvo_init(dev, SDVOB);
+			found = intel_sdvo_init(dev, SDVOB, true);
 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
 				intel_hdmi_init(dev, SDVOB);
@@ -7927,7 +6608,7 @@
 
 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
 			DRM_DEBUG_KMS("probing SDVOC\n");
-			found = intel_sdvo_init(dev, SDVOC);
+			found = intel_sdvo_init(dev, SDVOC, false);
 		}
 
 		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
@@ -8061,905 +6742,6 @@
 	.output_poll_changed = intel_fb_output_poll_changed,
 };
 
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
-	struct drm_i915_gem_object *ctx;
-	int ret;
-
-	DRM_LOCK_ASSERT(dev);
-
-	ctx = i915_gem_alloc_object(dev, 4096);
-	if (!ctx) {
-		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
-		return NULL;
-	}
-
-	ret = i915_gem_object_pin(ctx, 4096, true);
-	if (ret) {
-		DRM_ERROR("failed to pin power context: %d\n", ret);
-		goto err_unref;
-	}
-
-	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
-	if (ret) {
-		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
-		goto err_unpin;
-	}
-
-	return ctx;
-
-err_unpin:
-	i915_gem_object_unpin(ctx);
-err_unref:
-	drm_gem_object_unreference(&ctx->base);
-	DRM_UNLOCK(dev);
-	return NULL;
-}
-
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u16 rgvswctl;
-
-	rgvswctl = I915_READ16(MEMSWCTL);
-	if (rgvswctl & MEMCTL_CMD_STS) {
-		DRM_DEBUG("gpu busy, RCS change rejected\n");
-		return false; /* still busy with another command */
-	}
-
-	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
-		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
-	I915_WRITE16(MEMSWCTL, rgvswctl);
-	POSTING_READ16(MEMSWCTL);
-
-	rgvswctl |= MEMCTL_CMD_STS;
-	I915_WRITE16(MEMSWCTL, rgvswctl);
-
-	return true;
-}
-
-void ironlake_enable_drps(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 rgvmodectl = I915_READ(MEMMODECTL);
-	u8 fmax, fmin, fstart, vstart;
-
-	/* Enable temp reporting */
-	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
-	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
-
-	/* 100ms RC evaluation intervals */
-	I915_WRITE(RCUPEI, 100000);
-	I915_WRITE(RCDNEI, 100000);
-
-	/* Set max/min thresholds to 90ms and 80ms respectively */
-	I915_WRITE(RCBMAXAVG, 90000);
-	I915_WRITE(RCBMINAVG, 80000);
-
-	I915_WRITE(MEMIHYST, 1);
-
-	/* Set up min, max, and cur for interrupt handling */
-	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
-	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
-	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
-		MEMMODE_FSTART_SHIFT;
-
-	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
-		PXVFREQ_PX_SHIFT;
-
-	dev_priv->fmax = fmax; /* IPS callback will increase this */
-	dev_priv->fstart = fstart;
-
-	dev_priv->max_delay = fstart;
-	dev_priv->min_delay = fmin;
-	dev_priv->cur_delay = fstart;
-
-	DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
-			 fmax, fmin, fstart);
-
-	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
-	/*
-	 * Interrupts will be enabled in ironlake_irq_postinstall
-	 */
-
-	I915_WRITE(VIDSTART, vstart);
-	POSTING_READ(VIDSTART);
-
-	rgvmodectl |= MEMMODE_SWMODE_EN;
-	I915_WRITE(MEMMODECTL, rgvmodectl);
-
-	if (_intel_wait_for(dev,
-	    (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10,
-	    1, "915per"))
-		DRM_ERROR("stuck trying to change perf mode\n");
-	pause("915dsp", 1);
-
-	ironlake_set_drps(dev, fstart);
-
-	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
-		I915_READ(0x112e0);
-	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
-	dev_priv->last_count2 = I915_READ(0x112f4);
-	nanotime(&dev_priv->last_time2);
-}
-
-void ironlake_disable_drps(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u16 rgvswctl = I915_READ16(MEMSWCTL);
-
-	/* Ack interrupts, disable EFC interrupt */
-	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
-	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
-	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
-	I915_WRITE(DEIIR, DE_PCU_EVENT);
-	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
-
-	/* Go back to the starting frequency */
-	ironlake_set_drps(dev, dev_priv->fstart);
-	pause("915dsp", 1);
-	rgvswctl |= MEMCTL_CMD_STS;
-	I915_WRITE(MEMSWCTL, rgvswctl);
-	pause("915dsp", 1);
-
-}
-
-void gen6_set_rps(struct drm_device *dev, u8 val)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 swreq;
-
-	swreq = (val & 0x3ff) << 25;
-	I915_WRITE(GEN6_RPNSWREQ, swreq);
-}
-
-void gen6_disable_rps(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
-	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-	I915_WRITE(GEN6_PMIER, 0);
-	/* Complete PM interrupt masking here doesn't race with the rps work
-	 * item again unmasking PM interrupts because that is using a different
-	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
-	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
-
-	mtx_lock(&dev_priv->rps_lock);
-	dev_priv->pm_iir = 0;
-	mtx_unlock(&dev_priv->rps_lock);
-
-	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
-	unsigned long freq;
-	int div = (vidfreq & 0x3f0000) >> 16;
-	int post = (vidfreq & 0x3000) >> 12;
-	int pre = (vidfreq & 0x7);
-
-	if (!pre)
-		return 0;
-
-	freq = ((div * 133333) / ((1<<post) * pre));
-
-	return freq;
-}
-
-void intel_init_emon(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 lcfuse;
-	u8 pxw[16];
-	int i;
-
-	/* Disable to program */
-	I915_WRITE(ECR, 0);
-	POSTING_READ(ECR);
-
-	/* Program energy weights for various events */
-	I915_WRITE(SDEW, 0x15040d00);
-	I915_WRITE(CSIEW0, 0x007f0000);
-	I915_WRITE(CSIEW1, 0x1e220004);
-	I915_WRITE(CSIEW2, 0x04000004);
-
-	for (i = 0; i < 5; i++)
-		I915_WRITE(PEW + (i * 4), 0);
-	for (i = 0; i < 3; i++)
-		I915_WRITE(DEW + (i * 4), 0);
-
-	/* Program P-state weights to account for frequency power adjustment */
-	for (i = 0; i < 16; i++) {
-		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
-		unsigned long freq = intel_pxfreq(pxvidfreq);
-		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
-			PXVFREQ_PX_SHIFT;
-		unsigned long val;
-
-		val = vid * vid;
-		val *= (freq / 1000);
-		val *= 255;
-		val /= (127*127*900);
-		if (val > 0xff)
-			DRM_ERROR("bad pxval: %ld\n", val);
-		pxw[i] = val;
-	}
-	/* Render standby states get 0 weight */
-	pxw[14] = 0;
-	pxw[15] = 0;
-
-	for (i = 0; i < 4; i++) {
-		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
-			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
-		I915_WRITE(PXW + (i * 4), val);
-	}
-
-	/* Adjust magic regs to magic values (more experimental results) */
-	I915_WRITE(OGW0, 0);
-	I915_WRITE(OGW1, 0);
-	I915_WRITE(EG0, 0x00007f00);
-	I915_WRITE(EG1, 0x0000000e);
-	I915_WRITE(EG2, 0x000e0000);
-	I915_WRITE(EG3, 0x68000300);
-	I915_WRITE(EG4, 0x42000000);
-	I915_WRITE(EG5, 0x00140031);
-	I915_WRITE(EG6, 0);
-	I915_WRITE(EG7, 0);
-
-	for (i = 0; i < 8; i++)
-		I915_WRITE(PXWL + (i * 4), 0);
-
-	/* Enable PMON + select events */
-	I915_WRITE(ECR, 0x80000019);
-
-	lcfuse = I915_READ(LCFUSE02);
-
-	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-static int intel_enable_rc6(struct drm_device *dev)
-{
-	/*
-	 * Respect the kernel parameter if it is set
-	 */
-	if (i915_enable_rc6 >= 0)
-		return i915_enable_rc6;
-
-	/*
-	 * Disable RC6 on Ironlake
-	 */
-	if (INTEL_INFO(dev)->gen == 5)
-		return 0;
-
-	/*
-	 * Enable rc6 on Sandybridge if DMA remapping is disabled
-	 */
-	if (INTEL_INFO(dev)->gen == 6) {
-		DRM_DEBUG_DRIVER(
-		    "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
-		     intel_iommu_enabled ? "true" : "false",
-		     !intel_iommu_enabled ? "en" : "dis");
-		return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE);
-	}
-	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
-	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
-}
-
-void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
-	struct drm_device *dev = dev_priv->dev;
-	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
-	u32 pcu_mbox, rc6_mask = 0;
-	u32 gtfifodbg;
-	int cur_freq, min_freq, max_freq;
-	int rc6_mode;
-	int i;
-
-	/* Here begins a magic sequence of register writes to enable
-	 * auto-downclocking.
-	 *
-	 * Perhaps there might be some value in exposing these to
-	 * userspace...
-	 */
-	I915_WRITE(GEN6_RC_STATE, 0);
-	DRM_LOCK(dev);
-
-	/* Clear the DBG now so we don't confuse earlier errors */
-	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
-		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
-		I915_WRITE(GTFIFODBG, gtfifodbg);
-	}
-
-	gen6_gt_force_wake_get(dev_priv);
-
-	/* disable the counters and set deterministic thresholds */
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-
-	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
-	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
-	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
-	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
-	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10);
-
-	I915_WRITE(GEN6_RC_SLEEP, 0);
-	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
-	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
-	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
-	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
-	rc6_mode = intel_enable_rc6(dev_priv->dev);
-	if (rc6_mode & INTEL_RC6_ENABLE)
-		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
-
-	if (rc6_mode & INTEL_RC6p_ENABLE)
-		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-
-	if (rc6_mode & INTEL_RC6pp_ENABLE)
-		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
-
-	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
-			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
-			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
-
-	I915_WRITE(GEN6_RC_CONTROL,
-		   rc6_mask |
-		   GEN6_RC_CTL_EI_MODE(1) |
-		   GEN6_RC_CTL_HW_ENABLE);
-
-	I915_WRITE(GEN6_RPNSWREQ,
-		   GEN6_FREQUENCY(10) |
-		   GEN6_OFFSET(0) |
-		   GEN6_AGGRESSIVE_TURBO);
-	I915_WRITE(GEN6_RC_VIDEO_FREQ,
-		   GEN6_FREQUENCY(12));
-
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
-	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   18 << 24 |
-		   6 << 16);
-	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
-	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
-	I915_WRITE(GEN6_RP_UP_EI, 100000);
-	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
-	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-	I915_WRITE(GEN6_RP_CONTROL,
-		   GEN6_RP_MEDIA_TURBO |
-		   GEN6_RP_MEDIA_HW_MODE |
-		   GEN6_RP_MEDIA_IS_GFX |
-		   GEN6_RP_ENABLE |
-		   GEN6_RP_UP_BUSY_AVG |
-		   GEN6_RP_DOWN_IDLE_CONT);
-
-	if (_intel_wait_for(dev,
-	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
-	    1, "915pr1"))
-		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
-	I915_WRITE(GEN6_PCODE_DATA, 0);
-	I915_WRITE(GEN6_PCODE_MAILBOX,
-		   GEN6_PCODE_READY |
-		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
-	if (_intel_wait_for(dev,
-	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
-	    1, "915pr2"))
-		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
-	min_freq = (rp_state_cap & 0xff0000) >> 16;
-	max_freq = rp_state_cap & 0xff;
-	cur_freq = (gt_perf_status & 0xff00) >> 8;
-
-	/* Check for overclock support */
-	if (_intel_wait_for(dev,
-	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
-	    1, "915pr3"))
-		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
-	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
-	if (_intel_wait_for(dev,
-	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
-	    1, "915pr4"))
-		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-	if (pcu_mbox & (1<<31)) { /* OC supported */
-		max_freq = pcu_mbox & 0xff;
-		DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
-	}
-
-	/* In units of 100MHz */
-	dev_priv->max_delay = max_freq;
-	dev_priv->min_delay = min_freq;
-	dev_priv->cur_delay = cur_freq;
-
-	/* requires MSI enabled */
-	I915_WRITE(GEN6_PMIER,
-		   GEN6_PM_MBOX_EVENT |
-		   GEN6_PM_THERMAL_EVENT |
-		   GEN6_PM_RP_DOWN_TIMEOUT |
-		   GEN6_PM_RP_UP_THRESHOLD |
-		   GEN6_PM_RP_DOWN_THRESHOLD |
-		   GEN6_PM_RP_UP_EI_EXPIRED |
-		   GEN6_PM_RP_DOWN_EI_EXPIRED);
-	mtx_lock(&dev_priv->rps_lock);
-	if (dev_priv->pm_iir != 0)
-		printf("pm_iir %x\n", dev_priv->pm_iir);
-	I915_WRITE(GEN6_PMIMR, 0);
-	mtx_unlock(&dev_priv->rps_lock);
-	/* enable all PM interrupts */
-	I915_WRITE(GEN6_PMINTRMSK, 0);
-
-	gen6_gt_force_wake_put(dev_priv);
-	DRM_UNLOCK(dev);
-}
-
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
-	struct drm_device *dev;
-	int min_freq = 15;
-	int gpu_freq, ia_freq, max_ia_freq;
-	int scaling_factor = 180;
-	uint64_t tsc_freq;
-
-	dev = dev_priv->dev;
-#if 0
-	max_ia_freq = cpufreq_quick_get_max(0);
-	/*
-	 * Default to measured freq if none found, PCU will ensure we don't go
-	 * over
-	 */
-	if (!max_ia_freq)
-		max_ia_freq = tsc_freq;
-
-	/* Convert from Hz to MHz */
-	max_ia_freq /= 1000;
-#else
-	tsc_freq = atomic_load_acq_64(&tsc_freq);
-	max_ia_freq = tsc_freq / 1000 / 1000;
-#endif
-
-	DRM_LOCK(dev);
-
-	/*
-	 * For each potential GPU frequency, load a ring frequency we'd like
-	 * to use for memory access.  We do this by specifying the IA frequency
-	 * the PCU should use as a reference to determine the ring frequency.
-	 */
-	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
-	     gpu_freq--) {
-		int diff = dev_priv->max_delay - gpu_freq;
-		int d;
-
-		/*
-		 * For GPU frequencies less than 750MHz, just use the lowest
-		 * ring freq.
-		 */
-		if (gpu_freq < min_freq)
-			ia_freq = 800;
-		else
-			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
-		d = 100;
-		ia_freq = (ia_freq + d / 2) / d;
-
-		I915_WRITE(GEN6_PCODE_DATA,
-			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
-			   gpu_freq);
-		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
-			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
-		if (_intel_wait_for(dev,
-		    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-		    10, 1, "915frq")) {
-			DRM_ERROR("pcode write of freq table timed out\n");
-			continue;
-		}
-	}
-
-	DRM_UNLOCK(dev);
-}
-
-static void ironlake_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
-	/* Required for FBC */
-	dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
-		DPFCRUNIT_CLOCK_GATE_DISABLE |
-		DPFDUNIT_CLOCK_GATE_DISABLE;
-	/* Required for CxSR */
-	dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
-
-	I915_WRITE(PCH_3DCGDIS0,
-		   MARIUNIT_CLOCK_GATE_DISABLE |
-		   SVSMUNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(PCH_3DCGDIS1,
-		   VFMUNIT_CLOCK_GATE_DISABLE);
-
-	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
-	/*
-	 * According to the spec the following bits should be set in
-	 * order to enable memory self-refresh
-	 * The bit 22/21 of 0x42004
-	 * The bit 5 of 0x42020
-	 * The bit 15 of 0x45000
-	 */
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
-		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
-	I915_WRITE(ILK_DSPCLK_GATE,
-		   (I915_READ(ILK_DSPCLK_GATE) |
-		    ILK_DPARB_CLK_GATE));
-	I915_WRITE(DISP_ARB_CTL,
-		   (I915_READ(DISP_ARB_CTL) |
-		    DISP_FBC_WM_DIS));
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
-
-	/*
-	 * Based on the document from hardware guys the following bits
-	 * should be set unconditionally in order to enable FBC.
-	 * The bit 22 of 0x42000
-	 * The bit 22 of 0x42004
-	 * The bit 7,8,9 of 0x42020.
-	 */
-	if (IS_IRONLAKE_M(dev)) {
-		I915_WRITE(ILK_DISPLAY_CHICKEN1,
-			   I915_READ(ILK_DISPLAY_CHICKEN1) |
-			   ILK_FBCQ_DIS);
-		I915_WRITE(ILK_DISPLAY_CHICKEN2,
-			   I915_READ(ILK_DISPLAY_CHICKEN2) |
-			   ILK_DPARB_GATE);
-		I915_WRITE(ILK_DSPCLK_GATE,
-			   I915_READ(ILK_DSPCLK_GATE) |
-			   ILK_DPFC_DIS1 |
-			   ILK_DPFC_DIS2 |
-			   ILK_CLK_FBC);
-	}
-
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
-		   ILK_ELPIN_409_SELECT);
-	I915_WRITE(_3D_CHICKEN2,
-		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
-		   _3D_CHICKEN2_WM_READ_PIPELINED);
-}
-
-static void gen6_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe;
-	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
-	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
-		   ILK_ELPIN_409_SELECT);
-
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
-
-	I915_WRITE(GEN6_UCGCTL1,
-		   I915_READ(GEN6_UCGCTL1) |
-		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
-
-	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
-	 * gating disable must be set.  Failure to set it results in
-	 * flickering pixels due to Z write ordering failures after
-	 * some amount of runtime in the Mesa "fire" demo, and Unigine
-	 * Sanctuary and Tropics, and apparently anything else with
-	 * alpha test or pixel discard.
-	 *
-	 * According to the spec, bit 11 (RCCUNIT) must also be set,
-	 * but we didn't debug actual testcases to find it out.
-	 */
-	I915_WRITE(GEN6_UCGCTL2,
-		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
-		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
-	/*
-	 * According to the spec the following bits should be
-	 * set in order to enable memory self-refresh and fbc:
-	 * The bit21 and bit22 of 0x42000
-	 * The bit21 and bit22 of 0x42004
-	 * The bit5 and bit7 of 0x42020
-	 * The bit14 of 0x70180
-	 * The bit14 of 0x71180
-	 */
-	I915_WRITE(ILK_DISPLAY_CHICKEN1,
-		   I915_READ(ILK_DISPLAY_CHICKEN1) |
-		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
-		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
-	I915_WRITE(ILK_DSPCLK_GATE,
-		   I915_READ(ILK_DSPCLK_GATE) |
-		   ILK_DPARB_CLK_GATE  |
-		   ILK_DPFD_CLK_GATE);
-
-	for_each_pipe(pipe) {
-		I915_WRITE(DSPCNTR(pipe),
-			   I915_READ(DSPCNTR(pipe)) |
-			   DISPPLANE_TRICKLE_FEED_DISABLE);
-		intel_flush_display_plane(dev_priv, pipe);
-	}
-}
-
-static void ivybridge_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe;
-	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
-	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
-
-	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-	 * This implements the WaDisableRCZUnitClockGating workaround.
-	 */
-	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
-	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
-	I915_WRITE(IVB_CHICKEN3,
-		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
-		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
-	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
-	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
-		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
-	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
-	I915_WRITE(GEN7_L3CNTLREG1,
-			GEN7_WA_FOR_GEN7_L3_CONTROL);
-	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
-			GEN7_WA_L3_CHICKEN_MODE);
-
-	/* This is required by WaCatErrorRejectionIssue */
-	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
-	for_each_pipe(pipe) {
-		I915_WRITE(DSPCNTR(pipe),
-			   I915_READ(DSPCNTR(pipe)) |
-			   DISPPLANE_TRICKLE_FEED_DISABLE);
-		intel_flush_display_plane(dev_priv, pipe);
-	}
-}
-
-static void g4x_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dspclk_gate;
-
-	I915_WRITE(RENCLK_GATE_D1, 0);
-	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
-		   GS_UNIT_CLOCK_GATE_DISABLE |
-		   CL_UNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(RAMCLK_GATE_D, 0);
-	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
-		OVRUNIT_CLOCK_GATE_DISABLE |
-		OVCUNIT_CLOCK_GATE_DISABLE;
-	if (IS_GM45(dev))
-		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
-	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
-}
-
-static void crestline_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
-	I915_WRITE(RENCLK_GATE_D2, 0);
-	I915_WRITE(DSPCLK_GATE_D, 0);
-	I915_WRITE(RAMCLK_GATE_D, 0);
-	I915_WRITE16(DEUC, 0);
-}
-
-static void broadwater_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
-		   I965_RCC_CLOCK_GATE_DISABLE |
-		   I965_RCPB_CLOCK_GATE_DISABLE |
-		   I965_ISC_CLOCK_GATE_DISABLE |
-		   I965_FBC_CLOCK_GATE_DISABLE);
-	I915_WRITE(RENCLK_GATE_D2, 0);
-}
-
-static void gen3_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 dstate = I915_READ(D_STATE);
-
-	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
-		DSTATE_DOT_CLOCK_GATING;
-	I915_WRITE(D_STATE, dstate);
-}
-
-static void i85x_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-}
-
-static void i830_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/*
-	 * On Ibex Peak and Cougar Point, we need to disable clock
-	 * gating for the panel power sequencer or it will fail to
-	 * start up when no ports are active.
-	 */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe;
-
-	/*
-	 * On Ibex Peak and Cougar Point, we need to disable clock
-	 * gating for the panel power sequencer or it will fail to
-	 * start up when no ports are active.
-	 */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
-		   DPLS_EDP_PPS_FIX_DIS);
-	/* Without this, mode sets may fail silently on FDI */
-	for_each_pipe(pipe)
-		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
-static void ironlake_teardown_rc6(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->renderctx) {
-		i915_gem_object_unpin(dev_priv->renderctx);
-		drm_gem_object_unreference(&dev_priv->renderctx->base);
-		dev_priv->renderctx = NULL;
-	}
-
-	if (dev_priv->pwrctx) {
-		i915_gem_object_unpin(dev_priv->pwrctx);
-		drm_gem_object_unreference(&dev_priv->pwrctx->base);
-		dev_priv->pwrctx = NULL;
-	}
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (I915_READ(PWRCTXA)) {
-		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
-		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
-		(void)_intel_wait_for(dev,
-		    ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
-		    50, 1, "915pro");
-
-		I915_WRITE(PWRCTXA, 0);
-		POSTING_READ(PWRCTXA);
-
-		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-		POSTING_READ(RSTDBYCTL);
-	}
-
-	ironlake_teardown_rc6(dev);
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->renderctx == NULL)
-		dev_priv->renderctx = intel_alloc_context_page(dev);
-	if (!dev_priv->renderctx)
-		return -ENOMEM;
-
-	if (dev_priv->pwrctx == NULL)
-		dev_priv->pwrctx = intel_alloc_context_page(dev);
-	if (!dev_priv->pwrctx) {
-		ironlake_teardown_rc6(dev);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-void ironlake_enable_rc6(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
-
-	/* rc6 disabled by default due to repeated reports of hanging during
-	 * boot and resume.
-	 */
-	if (!intel_enable_rc6(dev))
-		return;
-
-	DRM_LOCK(dev);
-	ret = ironlake_setup_rc6(dev);
-	if (ret) {
-		DRM_UNLOCK(dev);
-		return;
-	}
-
-	/*
-	 * GPU can automatically power down the render unit if given a page
-	 * to save state.
-	 */
-	ret = BEGIN_LP_RING(6);
-	if (ret) {
-		ironlake_teardown_rc6(dev);
-		DRM_UNLOCK(dev);
-		return;
-	}
-
-	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
-	OUT_RING(MI_SET_CONTEXT);
-	OUT_RING(dev_priv->renderctx->gtt_offset |
-		 MI_MM_SPACE_GTT |
-		 MI_SAVE_EXT_STATE_EN |
-		 MI_RESTORE_EXT_STATE_EN |
-		 MI_RESTORE_INHIBIT);
-	OUT_RING(MI_SUSPEND_FLUSH);
-	OUT_RING(MI_NOOP);
-	OUT_RING(MI_FLUSH);
-	ADVANCE_LP_RING();
-
-	/*
-	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
-	 * does an implicit flush, combined with MI_FLUSH above, it should be
-	 * safe to assume that renderctx is valid
-	 */
-	ret = intel_wait_ring_idle(LP_RING(dev_priv));
-	if (ret) {
-		DRM_ERROR("failed to enable ironlake power power savings\n");
-		ironlake_teardown_rc6(dev);
-		DRM_UNLOCK(dev);
-		return;
-	}
-
-	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
-	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-	DRM_UNLOCK(dev);
-}
-
-void intel_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	dev_priv->display.init_clock_gating(dev);
-
-	if (dev_priv->display.init_pch_clock_gating)
-		dev_priv->display.init_pch_clock_gating(dev);
-}
-
 /* Set up chip specific display functions */
 static void intel_init_display(struct drm_device *dev)
 {
@@ -8969,33 +6751,21 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->display.dpms = ironlake_crtc_dpms;
 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_plane = ironlake_update_plane;
 	} else {
 		dev_priv->display.dpms = i9xx_crtc_dpms;
 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_plane = i9xx_update_plane;
 	}
 
-	if (I915_HAS_FBC(dev)) {
-		if (HAS_PCH_SPLIT(dev)) {
-			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-			dev_priv->display.enable_fbc = ironlake_enable_fbc;
-			dev_priv->display.disable_fbc = ironlake_disable_fbc;
-		} else if (IS_GM45(dev)) {
-			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
-			dev_priv->display.enable_fbc = g4x_enable_fbc;
-			dev_priv->display.disable_fbc = g4x_disable_fbc;
-		} else if (IS_CRESTLINE(dev)) {
-			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-			dev_priv->display.enable_fbc = i8xx_enable_fbc;
-			dev_priv->display.disable_fbc = i8xx_disable_fbc;
-		}
-		/* 855GM needs testing */
-	}
-
 	/* Returns the core display clock speed */
-	if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+	if (IS_VALLEYVIEW(dev))
 		dev_priv->display.get_display_clock_speed =
+			valleyview_get_display_clock_speed;
+	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+		dev_priv->display.get_display_clock_speed =
 			i945_get_display_clock_speed;
 	else if (IS_I915G(dev))
 		dev_priv->display.get_display_clock_speed =
@@ -9016,124 +6786,27 @@
 		dev_priv->display.get_display_clock_speed =
 			i830_get_display_clock_speed;
 
-	/* For FIFO watermark updates */
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
-		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
-		/* IVB configs may use multi-threaded forcewake */
-		if (IS_IVYBRIDGE(dev)) {
-			u32	ecobus;
-
-			/* A small trick here - if the bios hasn't configured MT forcewake,
-			 * and if the device is in RC6, then force_wake_mt_get will not wake
-			 * the device and the ECOBUS read will return zero. Which will be
-			 * (correctly) interpreted by the test below as MT forcewake being
-			 * disabled.
-			 */
-			DRM_LOCK(dev);
-			__gen6_gt_force_wake_mt_get(dev_priv);
-			ecobus = I915_READ_NOTRACE(ECOBUS);
-			__gen6_gt_force_wake_mt_put(dev_priv);
-			DRM_UNLOCK(dev);
-
-			if (ecobus & FORCEWAKE_MT_ENABLE) {
-				DRM_DEBUG_KMS("Using MT version of forcewake\n");
-				dev_priv->display.force_wake_get =
-					__gen6_gt_force_wake_mt_get;
-				dev_priv->display.force_wake_put =
-					__gen6_gt_force_wake_mt_put;
-			}
-		}
-
-		if (HAS_PCH_IBX(dev))
-			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
-		else if (HAS_PCH_CPT(dev))
-			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
 		if (IS_GEN5(dev)) {
-			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
-				dev_priv->display.update_wm = ironlake_update_wm;
-			else {
-				DRM_DEBUG_KMS("Failed to get proper latency. "
-					      "Disable CxSR\n");
-				dev_priv->display.update_wm = NULL;
-			}
 			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
-			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
 			dev_priv->display.write_eld = ironlake_write_eld;
 		} else if (IS_GEN6(dev)) {
-			if (SNB_READ_WM0_LATENCY()) {
-				dev_priv->display.update_wm = sandybridge_update_wm;
-				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
-			} else {
-				DRM_DEBUG_KMS("Failed to read display plane latency. "
-					      "Disable CxSR\n");
-				dev_priv->display.update_wm = NULL;
-			}
 			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
-			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
 			dev_priv->display.write_eld = ironlake_write_eld;
 		} else if (IS_IVYBRIDGE(dev)) {
 			/* FIXME: detect B0+ stepping and use auto training */
 			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
-			if (SNB_READ_WM0_LATENCY()) {
-				dev_priv->display.update_wm = sandybridge_update_wm;
-				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
-			} else {
-				DRM_DEBUG_KMS("Failed to read display plane latency. "
-					      "Disable CxSR\n");
-				dev_priv->display.update_wm = NULL;
-			}
-			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
 			dev_priv->display.write_eld = ironlake_write_eld;
+		} else if (IS_HASWELL(dev)) {
+			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+			dev_priv->display.write_eld = ironlake_write_eld;
 		} else
 			dev_priv->display.update_wm = NULL;
-	} else if (IS_PINEVIEW(dev)) {
-		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
-					    dev_priv->is_ddr3,
-					    dev_priv->fsb_freq,
-					    dev_priv->mem_freq)) {
-			DRM_INFO("failed to find known CxSR latency "
-				 "(found ddr%s fsb freq %d, mem freq %d), "
-				 "disabling CxSR\n",
-				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
-				 dev_priv->fsb_freq, dev_priv->mem_freq);
-			/* Disable CxSR and never update its watermark again */
-			pineview_disable_cxsr(dev);
-			dev_priv->display.update_wm = NULL;
-		} else
-			dev_priv->display.update_wm = pineview_update_wm;
-		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.force_wake_get = vlv_force_wake_get;
+		dev_priv->display.force_wake_put = vlv_force_wake_put;
 	} else if (IS_G4X(dev)) {
 		dev_priv->display.write_eld = g4x_write_eld;
-		dev_priv->display.update_wm = g4x_update_wm;
-		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
-	} else if (IS_GEN4(dev)) {
-		dev_priv->display.update_wm = i965_update_wm;
-		if (IS_CRESTLINE(dev))
-			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
-		else if (IS_BROADWATER(dev))
-			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
-	} else if (IS_GEN3(dev)) {
-		dev_priv->display.update_wm = i9xx_update_wm;
-		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
-		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
-	} else if (IS_I865G(dev)) {
-		dev_priv->display.update_wm = i830_update_wm;
-		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-		dev_priv->display.get_fifo_size = i830_get_fifo_size;
-	} else if (IS_I85X(dev)) {
-		dev_priv->display.update_wm = i9xx_update_wm;
-		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
-		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-	} else {
-		dev_priv->display.update_wm = i830_update_wm;
-		dev_priv->display.init_clock_gating = i830_init_clock_gating;
-		if (IS_845G(dev))
-			dev_priv->display.get_fifo_size = i845_get_fifo_size;
-		else
-			dev_priv->display.get_fifo_size = i830_get_fifo_size;
 	}
 
 	/* Default just returns -ENODEV to indicate unsupported */
@@ -9172,7 +6845,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
-	DRM_DEBUG("applying pipe a force quirk\n");
+	DRM_INFO("applying pipe a force quirk\n");
 }
 
 /*
@@ -9182,8 +6855,20 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+	DRM_INFO("applying lvds SSC disable quirk\n");
 }
 
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+	DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
 struct intel_quirk {
 	int device;
 	int subsystem_vendor;
@@ -9193,7 +6878,7 @@
 
 #define	PCI_ANY_ID	(~0u)
 
-struct intel_quirk intel_quirks[] = {
+static struct intel_quirk intel_quirks[] = {
 	/* HP Mini needs pipe A force quirk (LP: #322104) */
 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
 
@@ -9218,6 +6903,9 @@
 
 	/* Sony Vaio Y cannot use SSC on LVDS */
 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+	/* Acer Aspire 5734Z must invert backlight brightness */
+	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
@@ -9226,8 +6914,8 @@
 	device_t d;
 	int i;
 
-	d = dev->device;
-	for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) {
+	d = dev->dev;
+	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
 		q = &intel_quirks[i];
 		if (pci_get_device(d) == q->device &&
 		    (pci_get_subvendor(d) == q->subsystem_vendor ||
@@ -9253,7 +6941,7 @@
 #if 0
 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
 #endif
-	outb(VGA_SR_INDEX, 1);
+	outb(VGA_SR_INDEX, SR01);
 	sr1 = inb(VGA_SR_DATA);
 	outb(VGA_SR_DATA, sr1 | 1 << 5);
 #if 0
@@ -9265,6 +6953,40 @@
 	POSTING_READ(vga_reg);
 }
 
+static void ivb_pch_pwm_override(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * IVB has CPU eDP backlight regs too, set things up to let the
+	 * PCH regs control the backlight
+	 */
+	I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
+	I915_WRITE(BLC_PWM_CPU_CTL, 0);
+	I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE);
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	intel_init_clock_gating(dev);
+
+	if (IS_IRONLAKE_M(dev)) {
+		ironlake_enable_drps(dev);
+		ironlake_enable_rc6(dev);
+		intel_init_emon(dev);
+	}
+
+	if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+		gen6_enable_rps(dev_priv);
+		gen6_update_ring_freq(dev_priv);
+	}
+
+	if (IS_IVYBRIDGE(dev))
+		ivb_pch_pwm_override(dev);
+}
+
 void intel_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9278,11 +7000,14 @@
 	dev->mode_config.preferred_depth = 24;
 	dev->mode_config.prefer_shadow = 1;
 
-	dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *,
-	    &intel_mode_funcs);
+	dev->mode_config.funcs = &intel_mode_funcs;
 
 	intel_init_quirks(dev);
 
+	intel_init_pm(dev);
+
+	intel_prepare_ddi(dev);
+
 	intel_init_display(dev);
 
 	if (IS_GEN2(dev)) {
@@ -9307,30 +7032,19 @@
 			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
 	}
 
+	intel_pch_pll_init(dev);
+
 	/* Just disable it once at startup */
 	i915_disable_vga(dev);
 	intel_setup_outputs(dev);
 
-	intel_init_clock_gating(dev);
-
-	if (IS_IRONLAKE_M(dev)) {
-		ironlake_enable_drps(dev);
-		intel_init_emon(dev);
-	}
-
-	if (IS_GEN6(dev)) {
-		gen6_enable_rps(dev_priv);
-		gen6_update_ring_freq(dev_priv);
-	}
-
 	TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
-	callout_init(&dev_priv->idle_callout, CALLOUT_MPSAFE);
+	callout_init(&dev_priv->idle_callout, 1);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
-	if (IS_IRONLAKE_M(dev))
-		ironlake_enable_rc6(dev);
+	intel_modeset_init_hw(dev);
 
 	intel_setup_overlay(dev);
 }
@@ -9361,16 +7075,20 @@
 
 	if (IS_IRONLAKE_M(dev))
 		ironlake_disable_drps(dev);
-	if (IS_GEN6(dev))
+	if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
 		gen6_disable_rps(dev);
 
 	if (IS_IRONLAKE_M(dev))
 		ironlake_disable_rc6(dev);
 
+	if (IS_VALLEYVIEW(dev))
+		vlv_init_dpio(dev);
+
+	DRM_UNLOCK(dev);
+
 	/* Disable the irq before mode object teardown, for the irq might
 	 * enqueue unpin/hotplug work. */
 	drm_irq_uninstall(dev);
-	DRM_UNLOCK(dev);
 
 	if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
 		taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);

Modified: trunk/sys/dev/drm2/i915/intel_dp.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_dp.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_dp.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2008 Intel Corporation
  *
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_dp.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_dp.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -43,9 +44,6 @@
 
 #define DP_LINK_CONFIGURATION_SIZE	9
 
-/* XXXKIB what is the right code for the FreeBSD ? */
-#define EREMOTEIO	ENXIO
-
 struct intel_dp {
 	struct intel_encoder base;
 	uint32_t output_reg;
@@ -224,7 +222,7 @@
 
 static bool
 intel_dp_adjust_dithering(struct intel_dp *intel_dp,
-			  struct drm_display_mode *mode,
+			  const struct drm_display_mode *mode,
 			  struct drm_display_mode *adjusted_mode)
 {
 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
@@ -612,7 +610,7 @@
 				      reply, reply_bytes);
 		if (ret < 0) {
 			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
-			return (-ret);
+			return (ret);
 		}
 
 		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -623,7 +621,7 @@
 			break;
 		case AUX_NATIVE_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_ch native nack\n");
-			return (EREMOTEIO);
+			return (-EREMOTEIO);
 		case AUX_NATIVE_REPLY_DEFER:
 			DELAY(100);
 			continue;
@@ -630,7 +628,7 @@
 		default:
 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
 				  reply[0]);
-			return (EREMOTEIO);
+			return (-EREMOTEIO);
 		}
 
 		switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -641,7 +639,7 @@
 			return (0/*reply_bytes - 1*/);
 		case AUX_I2C_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_i2c nack\n");
-			return (EREMOTEIO);
+			return (-EREMOTEIO);
 		case AUX_I2C_REPLY_DEFER:
 			DRM_DEBUG_KMS("aux_i2c defer\n");
 			DELAY(100);
@@ -648,12 +646,12 @@
 			break;
 		default:
 			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
-			return (EREMOTEIO);
+			return (-EREMOTEIO);
 		}
 	}
 
 	DRM_ERROR("too many retries, giving up\n");
-	return (EREMOTEIO);
+	return (-EREMOTEIO);
 }
 
 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -668,7 +666,7 @@
 	DRM_DEBUG_KMS("i2c_init %s\n", name);
 
 	ironlake_edp_panel_vdd_on(intel_dp);
-	ret = iic_dp_aux_add_bus(intel_connector->base.dev->device, name,
+	ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
 	    intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
 	    &intel_dp->adapter);
 	ironlake_edp_panel_vdd_off(intel_dp, false);
@@ -676,7 +674,7 @@
 }
 
 static bool
-intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
 		    struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -684,7 +682,7 @@
 	int lane_count, clock;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
-	int bpp;
+	int bpp, mode_rate;
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -691,31 +689,32 @@
 		intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
 		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
 					mode, adjusted_mode);
-		/*
-		 * the mode->clock is used to calculate the Data&Link M/N
-		 * of the pipe. For the eDP the fixed clock should be used.
-		 */
-		mode->clock = intel_dp->panel_fixed_mode->clock;
 	}
 
-	if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
+	DRM_DEBUG_KMS("DP link computation with max lane count %i "
+		      "max bw %02x pixel clock %iKHz\n",
+		      max_lane_count, bws[max_clock], mode->clock);
+
+	if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode))
 		return false;
 
 	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+	mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
 
 	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
 		for (clock = 0; clock <= max_clock; clock++) {
 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
-			if (intel_dp_link_required(mode->clock, bpp)
-					<= link_avail) {
+			if (mode_rate <= link_avail) {
 				intel_dp->link_bw = bws[clock];
 				intel_dp->lane_count = lane_count;
 				adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
-				DRM_DEBUG_KMS("Display port link bw %02x lane "
-						"count %d clock %d\n",
+				DRM_DEBUG_KMS("DP link bw %02x lane "
+						"count %d clock %d bpp %d\n",
 				       intel_dp->link_bw, intel_dp->lane_count,
-				       adjusted_mode->clock);
+				       adjusted_mode->clock, bpp);
+				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+					      mode_rate, link_avail);
 				return true;
 			}
 		}
@@ -1148,6 +1147,7 @@
 
 	if (intel_dp->want_panel_vdd)
 		printf("Cannot turn power off while VDD is on\n");
+	ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
 
 	pp = ironlake_get_pp_control(dev_priv);
 	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
@@ -1945,6 +1945,23 @@
 	return false;
 }
 
+static void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+	u8 buf[3];
+
+	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+		return;
+
+	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+		DRM_DEBUG_KMS("Sink OUI: %02x%02x%02x\n",
+			      buf[0], buf[1], buf[2]);
+
+	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+		DRM_DEBUG_KMS("Branch OUI: %02x%02x%02x\n",
+			      buf[0], buf[1], buf[2]);
+}
+
 static bool
 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
 {
@@ -2122,6 +2139,8 @@
 	if (status != connector_status_connected)
 		return status;
 
+	intel_dp_probe_oui(intel_dp);
+
 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
 		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
 	} else {
@@ -2128,7 +2147,6 @@
 		edid = intel_dp_get_edid(connector, intel_dp->adapter);
 		if (edid) {
 			intel_dp->has_audio = drm_detect_monitor_audio(edid);
-			connector->display_info.raw_edid = NULL;
 			free(edid, DRM_MEM_KMS);
 		}
 	}
@@ -2194,7 +2212,6 @@
 	if (edid) {
 		has_audio = drm_detect_monitor_audio(edid);
 
-		connector->display_info.raw_edid = NULL;
 		free(edid, DRM_MEM_KMS);
 	}
 
@@ -2210,7 +2227,7 @@
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	int ret;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret)
 		return ret;
 
@@ -2284,7 +2301,7 @@
 			device_delete_child(intel_dp->dp_iic_bus,
 			    intel_dp->adapter);
 		}
-		device_delete_child(dev->device, intel_dp->dp_iic_bus);
+		device_delete_child(dev->dev, intel_dp->dp_iic_bus);
 	}
 	drm_encoder_cleanup(encoder);
 	if (is_edp(intel_dp)) {
@@ -2436,6 +2453,7 @@
 	}
 
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
 	connector->interlace_allowed = true;
 	connector->doublescan_allowed = 0;
 
@@ -2483,6 +2501,13 @@
 		pp_off = I915_READ(PCH_PP_OFF_DELAYS);
 		pp_div = I915_READ(PCH_PP_DIVISOR);
 
+		if (!pp_on || !pp_off || !pp_div) {
+			DRM_INFO("bad panel power sequencing delays, disabling panel\n");
+			intel_dp_encoder_destroy(&intel_dp->base.base);
+			intel_dp_destroy(&intel_connector->base);
+			return;
+		}
+
 		/* Pull timing values out of registers */
 		cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
 			PANEL_POWER_UP_DELAY_SHIFT;

Modified: trunk/sys/dev/drm2/i915/intel_drv.h
===================================================================
--- trunk/sys/dev/drm2/i915/intel_drv.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_drv.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright (c) 2006 Dave Airlie <airlied at linux.ie>
  * Copyright (c) 2007-2008 Intel Corporation
@@ -22,7 +23,7 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  * IN THE SOFTWARE.
  *
- * $FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_drv.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/i915/intel_drv.h 280369 2015-03-23 13:38:33Z kib $
  */
 
 #ifndef DRM_INTEL_DRV_H
@@ -55,6 +56,21 @@
 	ret;								\
 })
 
+#define wait_for_atomic_us(COND, US) ({ \
+	int i, ret__ = -ETIMEDOUT;	\
+	for (i = 0; i < (US); i++) {	\
+		if ((COND)) {		\
+			ret__ = 0;	\
+			break;		\
+		}			\
+		DELAY(1);		\
+	}				\
+	ret__;				\
+})
+
+#define	wait_for(COND, MS) _intel_wait_for(NULL, COND, MS, 1, "915wfi")
+#define	wait_for_atomic(COND, MS) _intel_wait_for(NULL, COND, MS, 0, "915wfa")
+
 #define KHz(x) (1000*x)
 #define MHz(x) KHz(1000*x)
 
@@ -174,8 +190,8 @@
 	bool cursor_visible;
 	unsigned int bpp;
 
-	bool no_pll; /* tertiary pipe for IVB */
-	bool use_pll_a;
+	/* We can share PLLs across outputs if the timings match */
+	struct intel_pch_pll *pch_pll;
 };
 
 struct intel_plane {
@@ -199,6 +215,25 @@
 			     struct drm_intel_sprite_colorkey *key);
 };
 
+struct intel_watermark_params {
+	unsigned long fifo_size;
+	unsigned long max_wm;
+	unsigned long default_wm;
+	unsigned long guard_size;
+	unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+	int is_desktop;
+	int is_ddr3;
+	unsigned long fsb_freq;
+	unsigned long mem_freq;
+	unsigned long display_sr;
+	unsigned long display_hpll_disable;
+	unsigned long cursor_sr;
+	unsigned long cursor_hpll_disable;
+};
+
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
 #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@@ -210,6 +245,8 @@
 #define DIP_TYPE_AVI    0x82
 #define DIP_VERSION_AVI 0x2
 #define DIP_LEN_AVI     13
+#define DIP_AVI_PR_1    0
+#define DIP_AVI_PR_2    1
 
 #define DIP_TYPE_SPD	0x83
 #define DIP_VERSION_SPD	0x1
@@ -243,23 +280,36 @@
 			uint8_t ITC_EC_Q_SC;
 			/* PB4 - VIC 6:0 */
 			uint8_t VIC;
-			/* PB5 - PR 3:0 */
-			uint8_t PR;
+			/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
+			uint8_t YQ_CN_PR;
 			/* PB6 to PB13 */
 			uint16_t top_bar_end;
 			uint16_t bottom_bar_start;
 			uint16_t left_bar_end;
 			uint16_t right_bar_start;
-		} avi;
+		} __attribute__ ((packed)) avi;
 		struct {
 			uint8_t vn[8];
 			uint8_t pd[16];
 			uint8_t sdi;
-		} spd;
+		} __attribute__ ((packed)) spd;
 		uint8_t payload[27];
 	} __attribute__ ((packed)) body;
 } __attribute__((packed));
 
+struct intel_hdmi {
+	struct intel_encoder base;
+	u32 sdvox_reg;
+	int ddc_bus;
+	int ddi_port;
+	uint32_t color_range;
+	bool has_hdmi_sink;
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	void (*write_infoframe)(struct drm_encoder *encoder,
+				struct dip_infoframe *frame);
+};
+
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
@@ -299,8 +349,13 @@
 
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
-extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+			    struct drm_display_mode *adjusted_mode);
+extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
+			    bool is_sdvob);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
 extern void intel_mark_busy(struct drm_device *dev,
@@ -314,13 +369,17 @@
 extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
 extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
 extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+				      enum plane plane);
 
+void intel_sanitize_pm(struct drm_device *dev);
+
 /* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
 extern void intel_pch_panel_fitting(struct drm_device *dev,
 				    int fitting_mode,
-				    struct drm_display_mode *mode,
+				    const struct drm_display_mode *mode,
 				    struct drm_display_mode *adjusted_mode);
 extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
 extern u32 intel_panel_get_backlight(struct drm_device *dev);
@@ -371,6 +430,7 @@
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				    u16 *blue, int regno);
 extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void ironlake_disable_rc6(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
@@ -377,7 +437,13 @@
 extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
 extern void gen6_disable_rps(struct drm_device *dev);
 extern void intel_init_emon(struct drm_device *dev);
+extern int intel_enable_rc6(const struct drm_device *dev);
 
+extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_ddi_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode);
+
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
 				      struct drm_i915_gem_object *obj,
 				      struct intel_ring_buffer *pipelined);
@@ -414,15 +480,30 @@
 extern void intel_write_eld(struct drm_encoder *encoder,
 			    struct drm_display_mode *mode);
 extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
 
 /* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_watermarks(struct drm_device *dev);
 extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
 					   uint32_t sprite_width,
 					   int pixel_size);
+extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
+			 struct drm_display_mode *mode);
+
 extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 
-#endif
+extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
+
+/* Power-related functions, located in intel_pm.c */
+extern void intel_init_pm(struct drm_device *dev);
+/* FBC */
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern void intel_update_fbc(struct drm_device *dev);
+
+#endif /* __INTEL_DRV_H__ */

Modified: trunk/sys/dev/drm2/i915/intel_fb.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_fb.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_fb.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2007 David Airlie
  *
@@ -25,8 +26,9 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_fb.c 252497 2013-07-02 04:45:51Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_fb.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
+#include "opt_syscons.h"
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
 #include <dev/drm2/drm_crtc.h>
@@ -41,8 +43,8 @@
 	struct drm_device *dev = ifbdev->helper.dev;
 #if 0
 	struct drm_i915_private *dev_priv = dev->dev_private;
+#endif
 	struct fb_info *info;
-#endif
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_i915_gem_object *obj;
@@ -72,20 +74,27 @@
 	DRM_LOCK(dev);
 
 	/* Flush everything out, we'll be doing GTT only from now on */
-	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
+	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 	if (ret) {
 		DRM_ERROR("failed to pin fb: %d\n", ret);
 		goto out_unref;
 	}
 
-#if 0
-	info = framebuffer_alloc(0, device);
+	info = framebuffer_alloc();
 	if (!info) {
 		ret = -ENOMEM;
 		goto out_unpin;
 	}
 
+#if 0
 	info->par = ifbdev;
+#else
+	info->fb_size = size;
+	info->fb_bpp = sizes->surface_bpp;
+	info->fb_pbase = dev->agp->base + obj->gtt_offset;
+	info->fb_vbase = (vm_offset_t)pmap_mapdev_attr(info->fb_pbase, size,
+	    PAT_WRITE_COMBINING);
+
 #endif
 
 	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
@@ -95,8 +104,8 @@
 	fb = &ifbdev->ifb.base;
 
 	ifbdev->helper.fb = fb;
+	ifbdev->helper.fbdev = info;
 #if 0
-	ifbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "inteldrmfb");
 
@@ -129,15 +138,14 @@
 	info->screen_size = size;
 
 //	memset(info->screen_base, 0, size);
+#endif
 
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-#endif
-
-	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
-		      fb->width, fb->height,
+	DRM_DEBUG_KMS("allocated %dx%d (s %dbits) fb: 0x%08x, bo %p\n",
+		      fb->width, fb->height, fb->depth,
 		      obj->gtt_offset, obj);
 
 	DRM_UNLOCK(dev);
@@ -182,21 +190,19 @@
 static void intel_fbdev_destroy(struct drm_device *dev,
 				struct intel_fbdev *ifbdev)
 {
-#if 0
 	struct fb_info *info;
-#endif
 	struct intel_framebuffer *ifb = &ifbdev->ifb;
 
-#if 0
 	if (ifbdev->helper.fbdev) {
 		info = ifbdev->helper.fbdev;
+#if 0
 		unregister_framebuffer(info);
 		iounmap(info->screen_base);
 		if (info->cmap.len)
 			fb_dealloc_cmap(&info->cmap);
+#endif
 		framebuffer_release(info);
 	}
-#endif
 
 	drm_fb_helper_fini(&ifbdev->helper);
 
@@ -207,7 +213,9 @@
 	}
 }
 
+#ifdef DEV_SC
 extern int sc_txtmouse_no_retrace_wait;
+#endif
 
 int intel_fbdev_init(struct drm_device *dev)
 {
@@ -231,7 +239,9 @@
 
 	drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
 	drm_fb_helper_initial_config(&ifbdev->helper, 32);
+#ifdef DEV_SC
 	sc_txtmouse_no_retrace_wait = 1;
+#endif
 	return 0;
 }
 

Modified: trunk/sys/dev/drm2/i915/intel_hdmi.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_hdmi.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_hdmi.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright 2006 Dave Airlie <airlied at linux.ie>
  * Copyright © 2006-2009 Intel Corporation
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_hdmi.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_hdmi.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -37,19 +38,7 @@
 #include <dev/drm2/i915/i915_drv.h>
 #include <dev/drm2/i915/intel_drv.h>
 
-struct intel_hdmi {
-	struct intel_encoder base;
-	u32 sdvox_reg;
-	int ddc_bus;
-	uint32_t color_range;
-	bool has_hdmi_sink;
-	bool has_audio;
-	enum hdmi_force_audio force_audio;
-	void (*write_infoframe)(struct drm_encoder *encoder,
-				struct dip_infoframe *frame);
-};
-
-static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 {
 	return container_of(encoder, struct intel_hdmi, base.base);
 }
@@ -75,68 +64,83 @@
 	frame->checksum = 0x100 - sum;
 }
 
-static u32 intel_infoframe_index(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(struct dip_infoframe *frame)
 {
-	u32 flags = 0;
+	switch (frame->type) {
+	case DIP_TYPE_AVI:
+		return VIDEO_DIP_SELECT_AVI;
+	case DIP_TYPE_SPD:
+		return VIDEO_DIP_SELECT_SPD;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
+	}
+}
 
+static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
+{
 	switch (frame->type) {
 	case DIP_TYPE_AVI:
-		flags |= VIDEO_DIP_SELECT_AVI;
-		break;
+		return VIDEO_DIP_ENABLE_AVI;
 	case DIP_TYPE_SPD:
-		flags |= VIDEO_DIP_SELECT_SPD;
-		break;
+		return VIDEO_DIP_ENABLE_SPD;
 	default:
-		DRM_DEBUG("unknown info frame type %d\n", frame->type);
-		break;
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
 	}
+}
 
-	return flags;
+static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+{
+	switch (frame->type) {
+	case DIP_TYPE_AVI:
+		return VIDEO_DIP_ENABLE_AVI_HSW;
+	case DIP_TYPE_SPD:
+		return VIDEO_DIP_ENABLE_SPD_HSW;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
+	}
 }
 
-static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
 {
-	u32 flags = 0;
-
 	switch (frame->type) {
 	case DIP_TYPE_AVI:
-		flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
-		break;
+		return HSW_TVIDEO_DIP_AVI_DATA(pipe);
 	case DIP_TYPE_SPD:
-		flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
-		break;
+		return HSW_TVIDEO_DIP_SPD_DATA(pipe);
 	default:
-		DRM_DEBUG("unknown info frame type %d\n", frame->type);
-		break;
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
 	}
-
-	return flags;
 }
 
-static void i9xx_write_infoframe(struct drm_encoder *encoder,
-				 struct dip_infoframe *frame)
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
 {
 	uint32_t *data = (uint32_t *)frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-	u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+	u32 val = I915_READ(VIDEO_DIP_CTL);
 	unsigned i, len = DIP_HEADER_SIZE + frame->len;
 
-
-	/* XXX first guess at handling video port, is this corrent? */
+	val &= ~VIDEO_DIP_PORT_MASK;
 	if (intel_hdmi->sdvox_reg == SDVOB)
-		port = VIDEO_DIP_PORT_B;
+		val |= VIDEO_DIP_PORT_B;
 	else if (intel_hdmi->sdvox_reg == SDVOC)
-		port = VIDEO_DIP_PORT_C;
+		val |= VIDEO_DIP_PORT_C;
 	else
 		return;
 
-	flags = intel_infoframe_index(frame);
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
 
-	val &= ~VIDEO_DIP_SELECT_MASK;
+	val &= ~g4x_infoframe_enable(frame);
+	val |= VIDEO_DIP_ENABLE;
 
-	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+	I915_WRITE(VIDEO_DIP_CTL, val);
 
 	for (i = 0; i < len; i += 4) {
 		I915_WRITE(VIDEO_DIP_DATA, *data);
@@ -143,41 +147,163 @@
 		data++;
 	}
 
-	flags |= intel_infoframe_flags(frame);
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+	I915_WRITE(VIDEO_DIP_CTL, val);
 }
 
-static void ironlake_write_infoframe(struct drm_encoder *encoder,
-				     struct dip_infoframe *frame)
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
 {
 	uint32_t *data = (uint32_t *)frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	unsigned i, len = DIP_HEADER_SIZE + frame->len;
-	u32 flags, val = I915_READ(reg);
+	u32 val = I915_READ(reg);
 
+	val &= ~VIDEO_DIP_PORT_MASK;
+	switch (intel_hdmi->sdvox_reg) {
+	case HDMIB:
+		val |= VIDEO_DIP_PORT_B;
+		break;
+	case HDMIC:
+		val |= VIDEO_DIP_PORT_C;
+		break;
+	case HDMID:
+		val |= VIDEO_DIP_PORT_D;
+		break;
+	default:
+		return;
+	}
+
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
-	flags = intel_infoframe_index(frame);
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
 
+	val &= ~g4x_infoframe_enable(frame);
+	val |= VIDEO_DIP_ENABLE;
+
+	I915_WRITE(reg, val);
+
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	unsigned i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(reg);
+
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
 
-	I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+	/* The DIP control register spec says that we need to update the AVI
+	 * infoframe without clearing its enable bit */
+	if (frame->type == DIP_TYPE_AVI)
+		val |= VIDEO_DIP_ENABLE_AVI;
+	else
+		val &= ~g4x_infoframe_enable(frame);
 
+	val |= VIDEO_DIP_ENABLE;
+
+	I915_WRITE(reg, val);
+
 	for (i = 0; i < len; i += 4) {
 		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
 		data++;
 	}
 
-	flags |= intel_infoframe_flags(frame);
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+	I915_WRITE(reg, val);
 }
 
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+				     struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+	unsigned i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(reg);
+
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
+
+	val &= ~g4x_infoframe_enable(frame);
+	val |= VIDEO_DIP_ENABLE;
+
+	I915_WRITE(reg, val);
+
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+	unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(ctl_reg);
+
+	if (data_reg == 0)
+		return;
+
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+	val &= ~hsw_infoframe_enable(frame);
+	I915_WRITE(ctl_reg, val);
+
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(data_reg + i, *data);
+		data++;
+	}
+
+	val |= hsw_infoframe_enable(frame);
+	I915_WRITE(ctl_reg, val);
+}
+
 static void intel_set_infoframe(struct drm_encoder *encoder,
 				struct dip_infoframe *frame)
 {
@@ -190,7 +316,8 @@
 	intel_hdmi->write_infoframe(encoder, frame);
 }
 
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+					 struct drm_display_mode *adjusted_mode)
 {
 	struct dip_infoframe avi_if = {
 		.type = DIP_TYPE_AVI,
@@ -198,10 +325,13 @@
 		.len = DIP_LEN_AVI,
 	};
 
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+		avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
 	intel_set_infoframe(encoder, &avi_if);
 }
 
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
 {
 	struct dip_infoframe spd_if;
 
@@ -222,8 +352,7 @@
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	u32 sdvox;
 
@@ -260,7 +389,7 @@
 	I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
 	POSTING_READ(intel_hdmi->sdvox_reg);
 
-	intel_hdmi_set_avi_infoframe(encoder);
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
 	intel_hdmi_set_spd_infoframe(encoder);
 }
 
@@ -318,7 +447,7 @@
 }
 
 static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
-				  struct drm_display_mode *mode,
+				  const struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
 {
 	return true;
@@ -334,7 +463,8 @@
 
 	intel_hdmi->has_hdmi_sink = false;
 	intel_hdmi->has_audio = false;
-	edid = drm_get_edid(connector, dev_priv->gmbus[intel_hdmi->ddc_bus]);
+	edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv,
+	    intel_hdmi->ddc_bus));
 
 	if (edid) {
 		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -344,7 +474,6 @@
 						drm_detect_hdmi_monitor(edid);
 			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
 		}
-		connector->display_info.raw_edid = NULL;
 		free(edid, DRM_MEM_KMS);
 	} else {
 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] got no edid, ddc port %d\n",
@@ -371,7 +500,8 @@
 	 */
 
 	return intel_ddc_get_modes(connector,
-	    dev_priv->gmbus[intel_hdmi->ddc_bus]);
+				   intel_gmbus_get_adapter(dev_priv,
+							   intel_hdmi->ddc_bus));
 }
 
 static bool
@@ -382,12 +512,13 @@
 	struct edid *edid;
 	bool has_audio = false;
 
-	edid = drm_get_edid(connector, dev_priv->gmbus[intel_hdmi->ddc_bus]);
+	edid = drm_get_edid(connector,
+			    intel_gmbus_get_adapter(dev_priv,
+						    intel_hdmi->ddc_bus));
 	if (edid) {
 		if (edid->input & DRM_EDID_INPUT_DIGITAL)
 			has_audio = drm_detect_monitor_audio(edid);
 
-		connector->display_info.raw_edid = NULL;
 		free(edid, DRM_MEM_KMS);
 	}
 
@@ -403,7 +534,7 @@
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	int ret;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret)
 		return ret;
 
@@ -458,6 +589,14 @@
 	free(connector, DRM_MEM_KMS);
 }
 
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
+	.dpms = intel_ddi_dpms,
+	.mode_fixup = intel_hdmi_mode_fixup,
+	.prepare = intel_encoder_prepare,
+	.mode_set = intel_ddi_mode_set,
+	.commit = intel_encoder_commit,
+};
+
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
 	.dpms = intel_hdmi_dpms,
 	.mode_fixup = intel_hdmi_mode_fixup,
@@ -542,21 +681,59 @@
 		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+	} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
+		DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
+		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+		intel_hdmi->ddi_port = PORT_B;
+		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+	} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
+		DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
+		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+		intel_hdmi->ddi_port = PORT_C;
+		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+	} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
+		DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
+		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+		intel_hdmi->ddi_port = PORT_D;
+		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+	} else {
+		/* If we got an unknown sdvox_reg, things are pretty much broken
+		 * in a way that we should let the kernel know about it */
+		DRM_DEBUG_KMS("unknown sdvox_reg %d\n", sdvox_reg);
 	}
 
-
 	intel_hdmi->sdvox_reg = sdvox_reg;
-
 	if (!HAS_PCH_SPLIT(dev)) {
-		intel_hdmi->write_infoframe = i9xx_write_infoframe;
+		intel_hdmi->write_infoframe = g4x_write_infoframe;
 		I915_WRITE(VIDEO_DIP_CTL, 0);
+	} else if (IS_VALLEYVIEW(dev)) {
+		intel_hdmi->write_infoframe = vlv_write_infoframe;
+		for_each_pipe(i)
+			I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
+	} else if (IS_HASWELL(dev)) {
+		/* FIXME: Haswell has a new set of DIP frame registers, but we are
+		 * just doing the minimal required for HDMI to work at this stage.
+		 */
+		intel_hdmi->write_infoframe = hsw_write_infoframe;
+		for_each_pipe(i)
+			I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+	} else if (HAS_PCH_IBX(dev)) {
+		intel_hdmi->write_infoframe = ibx_write_infoframe;
+		for_each_pipe(i)
+			I915_WRITE(TVIDEO_DIP_CTL(i), 0);
 	} else {
-		intel_hdmi->write_infoframe = ironlake_write_infoframe;
+		intel_hdmi->write_infoframe = cpt_write_infoframe;
 		for_each_pipe(i)
 			I915_WRITE(TVIDEO_DIP_CTL(i), 0);
 	}
 
-	drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+	if (IS_HASWELL(dev))
+		drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
+	else
+		drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
 
 	intel_hdmi_add_properties(intel_hdmi, connector);
 

Modified: trunk/sys/dev/drm2/i915/intel_iic.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_iic.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_iic.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright (c) 2006 Dave Airlie <airlied at linux.ie>
  * Copyright © 2006-2008,2010 Intel Corporation
@@ -54,7 +55,7 @@
  * SUCH DAMAGE.
  */
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_iic.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_iic.c 294292 2016-01-18 20:44:29Z jhb $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -67,9 +68,22 @@
 #include "iicbus_if.h"
 #include "iicbb_if.h"
 
-static int intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs);
 static void intel_teardown_gmbus_m(struct drm_device *dev, int m);
 
+struct gmbus_port {
+	const char *name;
+	int reg;
+};
+
+static const struct gmbus_port gmbus_ports[] = {
+	{ "ssc", GPIOB },
+	{ "vga", GPIOA },
+	{ "panel", GPIOC },
+	{ "dpc", GPIOD },
+	{ "dpb", GPIOE },
+	{ "dpd", GPIOF },
+};
+
 /* Intel GPIO access functions */
 
 #define I2C_RISEFALL_TIME 10
@@ -128,10 +142,7 @@
 	struct drm_i915_private *dev_priv;
 
 	dev_priv = dev->dev_private;
-	if (HAS_PCH_SPLIT(dev))
-		I915_WRITE(PCH_GMBUS0, 0);
-	else
-		I915_WRITE(GMBUS0, 0);
+	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
 }
 
 static int
@@ -225,15 +236,134 @@
 }
 
 static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct iic_msg *msg,
+    u32 gmbus1_index)
+{
+	int reg_offset = dev_priv->gpio_mmio_base;
+	u16 len = msg->len;
+	u8 *buf = msg->buf;
+
+	I915_WRITE(GMBUS1 + reg_offset,
+		   gmbus1_index |
+		   GMBUS_CYCLE_WAIT |
+		   (len << GMBUS_BYTE_COUNT_SHIFT) |
+		   (msg->slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) |
+		   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+	while (len) {
+		int ret;
+		u32 val, loop = 0;
+		u32 gmbus2;
+
+		ret = _intel_wait_for(sc->drm_dev,
+		    ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+		    (GMBUS_SATOER | GMBUS_HW_RDY)),
+		    50, 1, "915gbr");
+		if (ret)
+			return (-ETIMEDOUT);
+		if (gmbus2 & GMBUS_SATOER)
+			return (-ENXIO);
+
+		val = I915_READ(GMBUS3 + reg_offset);
+		do {
+			*buf++ = val & 0xff;
+			val >>= 8;
+		} while (--len != 0 && ++loop < 4);
+	}
+
+	return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct iic_msg *msg)
+{
+	int reg_offset = dev_priv->gpio_mmio_base;
+	u16 len = msg->len;
+	u8 *buf = msg->buf;
+	u32 val, loop;
+
+	val = loop = 0;
+	while (len && loop < 4) {
+		val |= *buf++ << (8 * loop++);
+		len -= 1;
+	}
+
+	I915_WRITE(GMBUS3 + reg_offset, val);
+	I915_WRITE(GMBUS1 + reg_offset,
+		   GMBUS_CYCLE_WAIT |
+		   (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+		   (msg->slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) |
+		   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+	while (len) {
+		int ret;
+		u32 gmbus2;
+
+		val = loop = 0;
+		do {
+			val |= *buf++ << (8 * loop);
+		} while (--len != 0 && ++loop < 4);
+
+		I915_WRITE(GMBUS3 + reg_offset, val);
+
+		ret = _intel_wait_for(sc->drm_dev,
+		    ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+		    (GMBUS_SATOER | GMBUS_HW_RDY)),
+		    50, 1, "915gbw");
+		if (ret)
+			return (-ETIMEDOUT);
+		if (gmbus2 & GMBUS_SATOER)
+			return (-ENXIO);
+	}
+	return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_read(struct iic_msg *msgs, int i, int num)
+{
+	return (i + 1 < num &&
+		!(msgs[i].flags & IIC_M_RD) && msgs[i].len <= 2 &&
+		(msgs[i + 1].flags & IIC_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct iic_msg *msgs)
+{
+	int reg_offset = dev_priv->gpio_mmio_base;
+	u32 gmbus1_index = 0;
+	u32 gmbus5 = 0;
+	int ret;
+
+	if (msgs[0].len == 2)
+		gmbus5 = GMBUS_2BYTE_INDEX_EN |
+			 msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+	if (msgs[0].len == 1)
+		gmbus1_index = GMBUS_CYCLE_INDEX |
+			       (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
+
+	/* GMBUS5 holds 16-bit index */
+	if (gmbus5)
+		I915_WRITE(GMBUS5 + reg_offset, gmbus5);
+
+	ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+
+	/* Clear GMBUS5 after each index transfer */
+	if (gmbus5)
+		I915_WRITE(GMBUS5 + reg_offset, 0);
+
+	return ret;
+}
+
+static int
 intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
 {
 	struct intel_iic_softc *sc;
 	struct drm_i915_private *dev_priv;
-	u8 *buf;
-	int error, i, reg_offset, unit;
-	u32 val, loop;
-	u16 len;
+	int error, i, ret, reg_offset, unit;
 
+	error = 0;
 	sc = device_get_softc(idev);
 	dev_priv = sc->drm_dev->dev_private;
 	unit = device_get_unit(idev);
@@ -240,87 +370,47 @@
 
 	sx_xlock(&dev_priv->gmbus_sx);
 	if (sc->force_bit_dev) {
-		error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs);
+		error = -IICBUS_TRANSFER(dev_priv->bbbus[unit], msgs, nmsgs);
 		goto out;
 	}
 
-	reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+	reg_offset = dev_priv->gpio_mmio_base;
 
 	I915_WRITE(GMBUS0 + reg_offset, sc->reg0);
 
 	for (i = 0; i < nmsgs; i++) {
-		len = msgs[i].len;
-		buf = msgs[i].buf;
+		u32 gmbus2;
 
-		if ((msgs[i].flags & IIC_M_RD) != 0) {
-			I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT |
-			    (i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) |
-			    (len << GMBUS_BYTE_COUNT_SHIFT) |
-			    (msgs[i].slave << GMBUS_SLAVE_ADDR_SHIFT) |
-			    GMBUS_SLAVE_READ | GMBUS_SW_RDY);
-			POSTING_READ(GMBUS2 + reg_offset);
-			do {
-				loop = 0;
-
-				if (_intel_wait_for(sc->drm_dev,
-				    (I915_READ(GMBUS2 + reg_offset) &
-					(GMBUS_SATOER | GMBUS_HW_RDY)) != 0,
-				    50, 1, "915gbr"))
-					goto timeout;
-				if ((I915_READ(GMBUS2 + reg_offset) &
-				    GMBUS_SATOER) != 0)
-					goto clear_err;
-
-				val = I915_READ(GMBUS3 + reg_offset);
-				do {
-					*buf++ = val & 0xff;
-					val >>= 8;
-				} while (--len != 0 && ++loop < 4);
-			} while (len != 0);
+		if (gmbus_is_index_read(msgs, i, nmsgs)) {
+			error = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+			i += 1;  /* set i to the index of the read xfer */
+		} else if (msgs[i].flags & IIC_M_RD) {
+			error = gmbus_xfer_read(dev_priv, &msgs[i], 0);
 		} else {
-			val = loop = 0;
-			do {
-				val |= *buf++ << (8 * loop);
-			} while (--len != 0 && ++loop < 4);
-
-			I915_WRITE(GMBUS3 + reg_offset, val);
-			I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT |
-			    (i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) |
-			    (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
-			    (msgs[i].slave << GMBUS_SLAVE_ADDR_SHIFT) |
-			    GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
-			POSTING_READ(GMBUS2+reg_offset);
-
-			while (len != 0) {
-				if (_intel_wait_for(sc->drm_dev,
-				    (I915_READ(GMBUS2 + reg_offset) &
-					(GMBUS_SATOER | GMBUS_HW_RDY)) != 0,
-				    50, 1, "915gbw"))
-					goto timeout;
-				if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
-					goto clear_err;
-
-				val = loop = 0;
-				do {
-					val |= *buf++ << (8 * loop);
-				} while (--len != 0 && ++loop < 4);
-
-				I915_WRITE(GMBUS3 + reg_offset, val);
-				POSTING_READ(GMBUS2 + reg_offset);
-			}
+			error = gmbus_xfer_write(dev_priv, &msgs[i]);
 		}
 
-		if (i + 1 < nmsgs && _intel_wait_for(sc->drm_dev,
-		    (I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER |
-			GMBUS_HW_WAIT_PHASE)) != 0,
-		    50, 1, "915gbh"))
+		if (error == -ETIMEDOUT)
 			goto timeout;
-		if ((I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) != 0)
+		if (error == -ENXIO)
 			goto clear_err;
+
+		ret = _intel_wait_for(sc->drm_dev,
+		    ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+		    (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE)),
+		    50, 1, "915gbh");
+		if (ret)
+			goto timeout;
+		if (gmbus2 & GMBUS_SATOER)
+			goto clear_err;
 	}
 
-	error = 0;
-done:
+	/* Generate a STOP condition on the bus. Note that gmbus can't generata
+	 * a STOP on the very first cycle. To simplify the code we
+	 * unconditionally generate the STOP condition with an additional gmbus
+	 * cycle. */
+	I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
 	/* Mark the GMBUS interface as disabled after waiting for idle.
 	 * We will re-enable it at the start of the next xfer,
 	 * till then let it sleep.
@@ -327,14 +417,25 @@
  	 */
 	if (_intel_wait_for(dev,
 	    (I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
-	    10, 1, "915gbu"))
-		DRM_INFO("GMBUS timed out waiting for idle\n");
+	    10, 1, "915gbu")) {
+		DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+		    sc->name);
+		error = -ETIMEDOUT;
+	}
 	I915_WRITE(GMBUS0 + reg_offset, 0);
-out:
-	sx_xunlock(&dev_priv->gmbus_sx);
-	return (error);
+	goto out;
 
 clear_err:
+	/*
+	 * Wait for bus to IDLE before clearing NAK.
+	 * If we clear the NAK while bus is still active, then it will stay
+	 * active and the next transaction may fail.
+	 */
+	if (_intel_wait_for(dev,
+	    (I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+	    10, 1, "915gbu"))
+		DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", sc->name);
+
 	/* Toggle the Software Clear Interrupt bit. This has the effect
 	 * of resetting the GMBUS controller and so clearing the
 	 * BUS_ERROR raised by the slave's NAK.
@@ -341,12 +442,25 @@
 	 */
 	I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
 	I915_WRITE(GMBUS1 + reg_offset, 0);
-	error = EIO;
-	goto done;
+	I915_WRITE(GMBUS0 + reg_offset, 0);
 
+	DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+			 sc->name, msgs[i].slave,
+			 (msgs[i].flags & IIC_M_RD) ? 'r' : 'w', msgs[i].len);
+
+	/*
+	 * If no ACK is received during the address phase of a transaction,
+	 * the adapter must report -ENXIO.
+	 * It is not clear what to return if no ACK is received at other times.
+	 * So, we always return -ENXIO in all NAK cases, to ensure we send
+	 * it at least during the one case that is specified.
+	 */
+	error = -ENXIO;
+	goto out;
+
 timeout:
-	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
-	    sc->reg0 & 0xff, sc->name);
+	DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+	    sc->name, sc->reg0 & 0xff);
 	I915_WRITE(GMBUS0 + reg_offset, 0);
 
 	/*
@@ -354,11 +468,24 @@
 	 * Try GPIO bitbanging instead.
 	 */
 	sc->force_bit_dev = true;
+	error = -IICBUS_TRANSFER(dev_priv->bbbus[unit], msgs, nmsgs);
 
-	error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs);
-	goto out;
+out:
+	sx_xunlock(&dev_priv->gmbus_sx);
+	return (-error);
 }
 
+device_t 
+intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+    unsigned port)
+{
+
+	if (!intel_gmbus_is_port_valid(port))
+		DRM_ERROR("GMBUS get adapter %d: invalid port\n", port);
+	return (intel_gmbus_is_port_valid(port) ? dev_priv->gmbus[port - 1] :
+	    NULL);
+}
+
 void
 intel_gmbus_set_speed(device_t idev, int speed)
 {
@@ -379,53 +506,36 @@
 }
 
 static int
-intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs)
+intel_iicbb_pre_xfer(device_t idev)
 {
-	device_t bridge_dev;
 	struct intel_iic_softc *sc;
 	struct drm_i915_private *dev_priv;
-	int ret;
-	int i;
 
-	bridge_dev = device_get_parent(device_get_parent(idev));
-	sc = device_get_softc(bridge_dev);
+	sc = device_get_softc(idev);
 	dev_priv = sc->drm_dev->dev_private;
 
 	intel_iic_reset(sc->drm_dev);
 	intel_iic_quirk_set(dev_priv, true);
-	IICBB_SETSDA(bridge_dev, 1);
-	IICBB_SETSCL(bridge_dev, 1);
+	IICBB_SETSDA(idev, 1);
+	IICBB_SETSCL(idev, 1);
 	DELAY(I2C_RISEFALL_TIME);
+	return (0);
+}
 
-	/* convert slave addresses to format expected by iicbb */
-	for (i = 0; i < nmsgs; i++) {
-		msgs[i].slave <<= 1;
-		/* force use of repeated start instead of default stop+start */
-		if (i != (nmsgs - 1))
-			 msgs[i].flags |= IIC_M_NOSTOP;
-	}
-	ret = iicbus_transfer(idev, msgs, nmsgs);
-	/* restore the addresses */
-	for (i = 0; i < nmsgs; i++)
-		msgs[i].slave >>= 1;
-	IICBB_SETSDA(bridge_dev, 1);
-	IICBB_SETSCL(bridge_dev, 1);
+static void
+intel_iicbb_post_xfer(device_t idev)
+{
+	struct intel_iic_softc *sc;
+	struct drm_i915_private *dev_priv;
+
+	sc = device_get_softc(idev);
+	dev_priv = sc->drm_dev->dev_private;
+
+	IICBB_SETSDA(idev, 1);
+	IICBB_SETSCL(idev, 1);
 	intel_iic_quirk_set(dev_priv, false);
-
-	return (ret);
 }
 
-static const char *gpio_names[GMBUS_NUM_PORTS] = {
-	"disabled",
-	"ssc",
-	"vga",
-	"panel",
-	"dpc",
-	"dpb",
-	"reserved",
-	"dpd",
-};
-
 static int
 intel_gmbus_probe(device_t dev)
 {
@@ -438,23 +548,30 @@
 {
 	struct drm_i915_private *dev_priv;
 	struct intel_iic_softc *sc;
-	int pin;
+	int pin, port;
 
 	sc = device_get_softc(idev);
 	sc->drm_dev = device_get_softc(device_get_parent(idev));
 	dev_priv = sc->drm_dev->dev_private;
 	pin = device_get_unit(idev);
+	port = pin + 1;
 
-	snprintf(sc->name, sizeof(sc->name), "gmbus bus %s", gpio_names[pin]);
+	snprintf(sc->name, sizeof(sc->name), "gmbus %s",
+	    intel_gmbus_is_port_valid(port) ? gmbus_ports[pin].name :
+	    "reserved");
 	device_set_desc(idev, sc->name);
 
 	/* By default use a conservative clock rate */
-	sc->reg0 = pin | GMBUS_RATE_100KHZ;
+	sc->reg0 = port | GMBUS_RATE_100KHZ;
 
-	/* XXX force bit banging until GMBUS is fully debugged */
+	/* gmbus seems to be broken on i830 */
+	if (IS_I830(sc->drm_dev))
+		sc->force_bit_dev = true;
+#if 0
 	if (IS_GEN2(sc->drm_dev)) {
 		sc->force_bit_dev = true;
 	}
+#endif
 
 	/* add bus interface device */
 	sc->iic_dev = device_add_child(idev, "iicbus", -1);
@@ -496,33 +613,25 @@
 static int
 intel_iicbb_attach(device_t idev)
 {
-	static const int map_pin_to_reg[] = {
-		0,
-		GPIOB,
-		GPIOA,
-		GPIOC,
-		GPIOD,
-		GPIOE,
-		0,
-		GPIOF
-	};
-
 	struct intel_iic_softc *sc;
 	struct drm_i915_private *dev_priv;
-	int pin;
+	int pin, port;
 
 	sc = device_get_softc(idev);
 	sc->drm_dev = device_get_softc(device_get_parent(idev));
 	dev_priv = sc->drm_dev->dev_private;
 	pin = device_get_unit(idev);
+	port = pin + 1;
 
-	snprintf(sc->name, sizeof(sc->name), "i915 iicbb %s", gpio_names[pin]);
+	snprintf(sc->name, sizeof(sc->name), "i915 iicbb %s",
+	    intel_gmbus_is_port_valid(port) ? gmbus_ports[pin].name :
+	    "reserved");
 	device_set_desc(idev, sc->name);
 
+	if (!intel_gmbus_is_port_valid(port))
+		pin = 1 ; /* GPIOA, VGA */
 	sc->reg0 = pin | GMBUS_RATE_100KHZ;
-	sc->reg = map_pin_to_reg[pin];
-	if (HAS_PCH_SPLIT(dev_priv->dev))
-		sc->reg += PCH_GPIOA - GPIOA;
+	sc->reg = dev_priv->gpio_mmio_base + gmbus_ports[pin].reg;
 
 	/* add generic bit-banging code */
 	sc->iic_dev = device_add_child(idev, "iicbb", -1);
@@ -530,6 +639,7 @@
 		return (ENXIO);
 	device_quiet(sc->iic_dev);
 	bus_generic_attach(idev);
+	iicbus_set_nostop(idev, true);
 
 	return (0);
 }
@@ -580,6 +690,8 @@
 	DEVMETHOD(iicbb_setscl,		intel_iicbb_setscl),
 	DEVMETHOD(iicbb_getsda,		intel_iicbb_getsda),
 	DEVMETHOD(iicbb_getscl,		intel_iicbb_getscl),
+	DEVMETHOD(iicbb_pre_xfer,	intel_iicbb_pre_xfer),
+	DEVMETHOD(iicbb_post_xfer,	intel_iicbb_post_xfer),
 	DEVMETHOD_END
 };
 static driver_t intel_iicbb_driver = {
@@ -601,14 +713,10 @@
 
 	dev_priv = dev->dev_private;
 	sx_init(&dev_priv->gmbus_sx, "gmbus");
-	dev_priv->gmbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
-	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-	dev_priv->bbbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
-	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-	dev_priv->gmbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
-	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-	dev_priv->bbbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
-	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+	if (HAS_PCH_SPLIT(dev))
+		dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+	else
+		dev_priv->gpio_mmio_base = 0;
 
 	/*
 	 * The Giant there is recursed, most likely.  Normally, the
@@ -616,21 +724,21 @@
 	 * driver.
 	 */
 	mtx_lock(&Giant);
-	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+	for (i = 0; i <= GMBUS_NUM_PORTS; i++) {
 		/*
 		 * Initialized bbbus_bridge before gmbus_bridge, since
 		 * gmbus may decide to force quirk transfer in the
 		 * attachment code.
 		 */
-		dev_priv->bbbus_bridge[i] = device_add_child(dev->device,
+		dev_priv->bbbus_bridge[i] = device_add_child(dev->dev,
 		    "intel_iicbb", i);
 		if (dev_priv->bbbus_bridge[i] == NULL) {
 			DRM_ERROR("bbbus bridge %d creation failed\n", i);
-			ret = ENXIO;
+			ret = -ENXIO;
 			goto err;
 		}
 		device_quiet(dev_priv->bbbus_bridge[i]);
-		ret = device_probe_and_attach(dev_priv->bbbus_bridge[i]);
+		ret = -device_probe_and_attach(dev_priv->bbbus_bridge[i]);
 		if (ret != 0) {
 			DRM_ERROR("bbbus bridge %d attach failed, %d\n", i,
 			    ret);
@@ -652,19 +760,19 @@
 
 		dev_priv->bbbus[i] = iic_dev;
 
-		dev_priv->gmbus_bridge[i] = device_add_child(dev->device,
+		dev_priv->gmbus_bridge[i] = device_add_child(dev->dev,
 		    "intel_gmbus", i);
 		if (dev_priv->gmbus_bridge[i] == NULL) {
 			DRM_ERROR("gmbus bridge %d creation failed\n", i);
-			ret = ENXIO;
+			ret = -ENXIO;
 			goto err;
 		}
 		device_quiet(dev_priv->gmbus_bridge[i]);
-		ret = device_probe_and_attach(dev_priv->gmbus_bridge[i]);
+		ret = -device_probe_and_attach(dev_priv->gmbus_bridge[i]);
 		if (ret != 0) {
 			DRM_ERROR("gmbus bridge %d attach failed, %d\n", i,
 			    ret);
-			ret = ENXIO;
+			ret = -ENXIO;
 			goto err;
 		}
 
@@ -695,14 +803,6 @@
 
 	dev_priv = dev->dev_private;
 
-	free(dev_priv->gmbus, DRM_MEM_DRIVER);
-	dev_priv->gmbus = NULL;
-	free(dev_priv->bbbus, DRM_MEM_DRIVER);
-	dev_priv->bbbus = NULL;
-	free(dev_priv->gmbus_bridge, DRM_MEM_DRIVER);
-	dev_priv->gmbus_bridge = NULL;
-	free(dev_priv->bbbus_bridge, DRM_MEM_DRIVER);
-	dev_priv->bbbus_bridge = NULL;
 	sx_destroy(&dev_priv->gmbus_sx);
 }
 

Modified: trunk/sys/dev/drm2/i915/intel_lvds.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_lvds.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_lvds.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2006-2007 Intel Corporation
  * Copyright (c) 2006 Dave Airlie <airlied at linux.ie>
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_lvds.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_lvds.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -230,7 +231,7 @@
 }
 
 static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
-				  struct drm_display_mode *mode,
+				  const struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -482,7 +483,7 @@
 
 static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
 {
-	DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+	DRM_INFO("Skipping forced modeset for %s\n", id->ident);
 	return 1;
 }
 
@@ -638,7 +639,7 @@
 
 static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
 {
-	DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
+	DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
 	return 1;
 }
 
@@ -861,8 +862,8 @@
 		    child->device_type != DEVICE_TYPE_LFP)
 			continue;
 
-		if (child->i2c_pin)
-		    *i2c_pin = child->i2c_pin;
+		if (intel_gmbus_is_port_valid(child->i2c_pin))
+			*i2c_pin = child->i2c_pin;
 
 		/* However, we cannot trust the BIOS writers to populate
 		 * the VBT correctly.  Since LVDS requires additional
@@ -978,7 +979,7 @@
 	 * the initial panel fitting mode will be FULL_SCREEN.
 	 */
 
-	drm_connector_attach_property(&intel_connector->base,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.scaling_mode_property,
 				      DRM_MODE_SCALE_ASPECT);
 	intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
@@ -996,7 +997,8 @@
 	 * Attempt to get the fixed panel mode from DDC.  Assume that the
 	 * preferred mode is the right one.
 	 */
-	intel_lvds->edid = drm_get_edid(connector, dev_priv->gmbus[pin]);
+	intel_lvds->edid = drm_get_edid(connector,
+					intel_gmbus_get_adapter(dev_priv, pin));
 	if (intel_lvds->edid) {
 		if (drm_add_edid_modes(connector,
 				       intel_lvds->edid)) {

Modified: trunk/sys/dev/drm2/i915/intel_modes.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_modes.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_modes.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright (c) 2007 Dave Airlie <airlied at linux.ie>
  * Copyright (c) 2007, 2010 Intel Corporation
@@ -24,7 +25,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_modes.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_modes.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -45,13 +46,13 @@
 	u8 buf[2];
 	struct iic_msg msgs[] = {
 		{
-			.slave = DDC_ADDR,
+			.slave = DDC_ADDR << 1,
 			.flags = IIC_M_WR,
 			.len = 1,
 			.buf = out_buf,
 		},
 		{
-			.slave = DDC_ADDR,
+			.slave = DDC_ADDR << 1,
 			.flags = IIC_M_RD,
 			.len = 1,
 			.buf = buf,
@@ -58,8 +59,8 @@
 		}
 	};
 
-	return (iicbus_transfer(dev_priv->gmbus[ddc_bus], msgs, 2)
-	    == 0/* XXXKIB  2*/);
+	return (iicbus_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
+	    msgs, 2) == 0/* XXXKIB  2*/);
 }
 
 /**
@@ -80,7 +81,6 @@
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
 		drm_edid_to_eld(connector, edid);
-		connector->display_info.raw_edid = NULL;
 		free(edid, DRM_MEM_KMS);
 	}
 
@@ -106,13 +106,13 @@
 		prop = drm_property_create_enum(dev, 0,
 					   "audio",
 					   force_audio_names,
-					   DRM_ARRAY_SIZE(force_audio_names));
+					   ARRAY_SIZE(force_audio_names));
 		if (prop == NULL)
 			return;
 
 		dev_priv->force_audio_property = prop;
 	}
-	drm_connector_attach_property(connector, prop, 0);
+	drm_object_attach_property(&connector->base, prop, 0);
 }
 
 static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -132,7 +132,7 @@
 		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
 		    "Broadcast RGB",
 		    broadcast_rgb_names,
-		    DRM_ARRAY_SIZE(broadcast_rgb_names));
+		    ARRAY_SIZE(broadcast_rgb_names));
 		if (prop == NULL)
 			return;
 
@@ -139,5 +139,5 @@
 		dev_priv->broadcast_rgb_property = prop;
 	}
 
-	drm_connector_attach_property(connector, prop, 0);
+	drm_object_attach_property(&connector->base, prop, 0);
 }

Modified: trunk/sys/dev/drm2/i915/intel_opregion.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_opregion.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_opregion.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright 2008 Intel Corporation <hong.liu at intel.com>
  * Copyright 2008 Red Hat <mjg at redhat.com>
@@ -26,12 +27,15 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_opregion.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_opregion.c 295623 2016-02-15 07:35:40Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/i915/i915_drm.h>
 #include <dev/drm2/i915/i915_drv.h>
 #include <dev/drm2/i915/intel_drv.h>
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
 
 #define PCI_ASLE 0xe4
 #define PCI_ASLS 0xfc
@@ -144,7 +148,7 @@
 #define ACPI_DIGITAL_OUTPUT (3<<8)
 #define ACPI_LVDS_OUTPUT (4<<8)
 
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_ACPI)
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -289,6 +293,7 @@
 
 static struct intel_opregion *system_opregion;
 
+#if 0
 static int intel_opregion_video_event(struct notifier_block *nb,
 				      unsigned long val, void *data)
 {
@@ -319,6 +324,7 @@
 static struct notifier_block intel_opregion_notifier = {
 	.notifier_call = intel_opregion_video_event,
 };
+#endif
 
 /*
  * Initialise the DIDL field in opregion. This passes a list of devices to
@@ -326,24 +332,42 @@
  * (version 3)
  */
 
+static int acpi_is_video_device(ACPI_HANDLE devh) {
+	ACPI_HANDLE h;
+	if (ACPI_FAILURE(AcpiGetHandle(devh, "_DOD", &h)) ||
+		ACPI_FAILURE(AcpiGetHandle(devh, "_DOS", &h))) {
+		return 0;
+	}
+	return 1;
+}
+
 static void intel_didl_outputs(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_opregion *opregion = &dev_priv->opregion;
 	struct drm_connector *connector;
-	acpi_handle handle;
-	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
-	unsigned long long device_id;
-	acpi_status status;
+	u32 device_id;
+	ACPI_HANDLE handle, acpi_video_bus, acpi_cdev;
+	ACPI_STATUS status;
 	int i = 0;
 
-	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
-	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+	handle = acpi_get_handle(dev->dev);
+	if (!handle)
 		return;
 
-	if (acpi_is_video_device(acpi_dev))
-		acpi_video_bus = acpi_dev;
+	if (acpi_is_video_device(handle))
+		acpi_video_bus = handle;
 	else {
+		acpi_cdev = NULL;
+		acpi_video_bus = NULL;
+		while (AcpiGetNextObject(ACPI_TYPE_DEVICE, handle, acpi_cdev,
+					&acpi_cdev) != AE_NOT_FOUND) {
+			if (acpi_is_video_device(acpi_cdev)) {
+				acpi_video_bus = acpi_cdev;
+				break;
+			}
+		}
+#if 0
 		list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
 			if (acpi_is_video_device(acpi_cdev)) {
 				acpi_video_bus = acpi_cdev;
@@ -350,13 +374,30 @@
 				break;
 			}
 		}
+#endif
 	}
 
 	if (!acpi_video_bus) {
-		printk(KERN_WARNING "No ACPI video bus found\n");
+		device_printf(dev->dev, "No ACPI video bus found\n");
 		return;
 	}
 
+	acpi_cdev = NULL;
+	while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpi_video_bus, acpi_cdev,
+				&acpi_cdev) != AE_NOT_FOUND) {
+		if (i >= 8) {
+			device_printf(dev->dev, "More than 8 outputs detected\n");
+			return;
+		}
+		status = acpi_GetInteger(acpi_cdev, "_ADR", &device_id);
+		if (ACPI_SUCCESS(status)) {
+			if (!device_id)
+				goto blind_set;
+			opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+			i++;
+		}
+	}
+#if 0
 	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
 		if (i >= 8) {
 			dev_printk(KERN_ERR, &dev->pdev->dev,
@@ -373,6 +414,7 @@
 			i++;
 		}
 	}
+#endif
 
 end:
 	/* If fewer than 8 outputs, the list must be null terminated */
@@ -385,7 +427,7 @@
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		int output_type = ACPI_OTHER_OUTPUT;
 		if (i >= 8) {
-			device_printf(dev->device,
+			device_printf(dev->dev,
 				    "More than 8 outputs detected\n");
 			return;
 		}
@@ -417,6 +459,25 @@
 	goto end;
 }
 
+static void intel_setup_cadls(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	int i = 0;
+	u32 disp_id;
+
+	/* Initialize the CADL field by duplicating the DIDL values.
+	 * Technically, this is not always correct as display outputs may exist,
+	 * but not active. This initialization is necessary for some Clevo
+	 * laptops that check this field before processing the brightness and
+	 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+	 * there are less than eight devices. */
+	do {
+		disp_id = opregion->acpi->didl[i];
+		opregion->acpi->cadl[i] = disp_id;
+	} while (++i < 8 && disp_id != 0);
+}
+
 void intel_opregion_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -426,8 +487,10 @@
 		return;
 
 	if (opregion->acpi) {
-		if (drm_core_check_feature(dev, DRIVER_MODESET))
+		if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 			intel_didl_outputs(dev);
+			intel_setup_cadls(dev);
+		}
 
 		/* Notify BIOS we are ready to handle ACPI video ext notifs.
 		 * Right now, all the events are handled by the ACPI video module.
@@ -436,7 +499,9 @@
 		opregion->acpi->drdy = 1;
 
 		system_opregion = opregion;
+#if 0
 		register_acpi_notifier(&intel_opregion_notifier);
+#endif
 	}
 
 	if (opregion->asle)
@@ -455,11 +520,13 @@
 		opregion->acpi->drdy = 0;
 
 		system_opregion = NULL;
+#if 0
 		unregister_acpi_notifier(&intel_opregion_notifier);
+#endif
 	}
 
 	/* just clear all opregion memory pointers now */
-	iounmap(opregion->header);
+	pmap_unmapdev((vm_offset_t)opregion->header, OPREGION_SIZE);
 	opregion->header = NULL;
 	opregion->acpi = NULL;
 	opregion->swsci = NULL;
@@ -467,11 +534,9 @@
 	opregion->vbt = NULL;
 }
 #else
-int
+void
 intel_opregion_init(struct drm_device *dev)
 {
-
-	return (0);
 }
 
 void
@@ -503,7 +568,7 @@
 	u32 asls, mboxes;
 	int err = 0;
 
-	asls = pci_read_config(dev->device, PCI_ASLS, 4);
+	asls = pci_read_config(dev->dev, PCI_ASLS, 4);
 	DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
 	if (asls == 0) {
 		DRM_DEBUG("ACPI OpRegion not supported!\n");

Modified: trunk/sys/dev/drm2/i915/intel_overlay.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_overlay.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_overlay.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2009
  *
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_overlay.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_overlay.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -219,10 +220,11 @@
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	int ret;
 
 	KASSERT(!overlay->last_flip_req, ("Overlay already has flip req"));
-	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+	ret = i915_add_request(ring, NULL, request);
 	if (ret) {
 		free(request, DRM_I915_GEM);
 		return ret;
@@ -229,10 +231,10 @@
 	}
 	overlay->last_flip_req = request->seqno;
 	overlay->flip_tail = tail;
-	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
-				true);
+	ret = i915_wait_request(ring, overlay->last_flip_req);
 	if (ret)
 		return ret;
+	i915_gem_retire_requests(dev);
 
 	overlay->last_flip_req = 0;
 	return 0;
@@ -266,7 +268,7 @@
 	DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
 
 	mode = drm_mode_duplicate(dev, &vesa_640x480);
-	drm_mode_set_crtcinfo(mode, 0);
+
 	if (!drm_crtc_helper_set_mode(&crtc->base, mode,
 				       crtc->base.x, crtc->base.y,
 				       crtc->base.fb))
@@ -291,6 +293,7 @@
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	struct drm_i915_gem_request *request;
 	int pipe_a_quirk = 0;
 	int ret;
@@ -306,17 +309,17 @@
 
 	request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
 
-	ret = BEGIN_LP_RING(4);
+	ret = intel_ring_begin(ring, 4);
 	if (ret) {
 		free(request, DRM_I915_GEM);
 		goto out;
 	}
 
-	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	OUT_RING(overlay->flip_addr | OFC_UPDATE);
-	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	OUT_RING(MI_NOOP);
-	ADVANCE_LP_RING();
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	ret = intel_overlay_do_wait_request(overlay, request, NULL);
 out:
@@ -332,6 +335,7 @@
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	struct drm_i915_gem_request *request;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
@@ -349,16 +353,16 @@
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	ret = BEGIN_LP_RING(2);
+	ret = intel_ring_begin(ring, 2);
 	if (ret) {
 		free(request, DRM_I915_GEM);
 		return ret;
 	}
-	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	OUT_RING(flip_addr);
-	ADVANCE_LP_RING();
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_advance(ring);
 
-	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+	ret = i915_add_request(ring, NULL, request);
 	if (ret) {
 		free(request, DRM_I915_GEM);
 		return ret;
@@ -399,6 +403,7 @@
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	u32 flip_addr = overlay->flip_addr;
 	struct drm_i915_gem_request *request;
 	int ret;
@@ -413,20 +418,20 @@
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	ret = BEGIN_LP_RING(6);
+	ret = intel_ring_begin(ring, 6);
 	if (ret) {
 		free(request, DRM_I915_GEM);
 		return ret;
 	}
 	/* wait for overlay to go idle */
-	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	OUT_RING(flip_addr);
-	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
-	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	OUT_RING(flip_addr);
-	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	ADVANCE_LP_RING();
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_advance(ring);
 
 	return intel_overlay_do_wait_request(overlay, request,
 					     intel_overlay_off_tail);
@@ -438,15 +443,16 @@
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	int ret;
 
 	if (overlay->last_flip_req == 0)
 		return 0;
 
-	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
-				true);
+	ret = i915_wait_request(ring, overlay->last_flip_req);
 	if (ret)
 		return ret;
+	i915_gem_retire_requests(dev);
 
 	if (overlay->flip_tail)
 		overlay->flip_tail(overlay);
@@ -463,6 +469,7 @@
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 	int ret;
 
 	/* Only wait if there is actually an old frame to release to
@@ -477,15 +484,15 @@
 		/* synchronous slowpath */
 		request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
 
-		ret = BEGIN_LP_RING(2);
+		ret = intel_ring_begin(ring, 2);
 		if (ret) {
 			free(request, DRM_I915_GEM);
 			return ret;
 		}
 
-		OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		OUT_RING(MI_NOOP);
-		ADVANCE_LP_RING();
+		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 
 		ret = intel_overlay_do_wait_request(overlay, request,
 						    intel_overlay_release_old_vid_tail);
@@ -764,10 +771,10 @@
 	int ret, tmp_width;
 	struct overlay_registers *regs;
 	bool scale_changed = false;
+	u32 swidth, swidthsw, sheight, ostride;
 
 	KASSERT(overlay != NULL, ("No overlay ?"));
 	DRM_LOCK_ASSERT(overlay->dev);
-	DRM_MODE_CONFIG_ASSERT_LOCKED(overlay->dev);
 
 	ret = intel_overlay_release_old_vid(overlay);
 	if (ret != 0)
@@ -782,16 +789,18 @@
 		goto out_unpin;
 
 	if (!overlay->active) {
+		u32 oconfig;
 		regs = intel_overlay_map_regs(overlay);
 		if (!regs) {
 			ret = -ENOMEM;
 			goto out_unpin;
 		}
-		regs->OCONFIG = OCONF_CC_OUT_8BIT;
+		oconfig = OCONF_CC_OUT_8BIT;
 		if (IS_GEN4(overlay->dev))
-			regs->OCONFIG |= OCONF_CSC_MODE_BT709;
-		regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+			oconfig |= OCONF_CSC_MODE_BT709;
+		oconfig |= overlay->crtc->pipe == 0 ?
 			OCONF_PIPE_A : OCONF_PIPE_B;
+		regs->OCONFIG = oconfig;
 		intel_overlay_unmap_regs(overlay, regs);
 
 		ret = intel_overlay_on(overlay);
@@ -813,29 +822,33 @@
 	else
 		tmp_width = params->src_w;
 
-	regs->SWIDTH = params->src_w;
-	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
-				       params->offset_Y, tmp_width);
-	regs->SHEIGHT = params->src_h;
+	swidth = params->src_w;
+	swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+	sheight = params->src_h;
 	regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
-	regs->OSTRIDE = params->stride_Y;
+	ostride = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
 		int uv_hscale = uv_hsubsampling(params->format);
 		int uv_vscale = uv_vsubsampling(params->format);
 		u32 tmp_U, tmp_V;
-		regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+		swidth |= (params->src_w/uv_hscale) << 16;
 		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
 				      params->src_w/uv_hscale);
 		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
 				      params->src_w/uv_hscale);
-		regs->SWIDTHSW |= max_u32(tmp_U, tmp_V) << 16;
-		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+		swidthsw |= max_u32(tmp_U, tmp_V) << 16;
+		sheight |= (params->src_h/uv_vscale) << 16;
 		regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
 		regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
-		regs->OSTRIDE |= params->stride_UV << 16;
+		ostride |= params->stride_UV << 16;
 	}
 
+	regs->SWIDTH = swidth;
+	regs->SWIDTHSW = swidthsw;
+	regs->SHEIGHT = sheight;
+	regs->OSTRIDE = ostride;
+
 	scale_changed = update_scaling_factors(overlay, regs, params);
 
 	update_colorkey(overlay, regs);
@@ -864,7 +877,6 @@
 	int ret;
 
 	DRM_LOCK_ASSERT(overlay->dev);
-	DRM_MODE_CONFIG_ASSERT_LOCKED(overlay->dev);
 
 	ret = intel_overlay_recover_from_interrupt(overlay);
 	if (ret != 0)
@@ -1108,11 +1120,7 @@
 	struct put_image_params *params;
 	int ret;
 
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
+	/* No need to check for DRIVER_MODESET - we don't set it up then. */
 	overlay = dev_priv->overlay;
 	if (!overlay) {
 		DRM_DEBUG("userspace bug: no overlay\n");
@@ -1307,11 +1315,7 @@
 	struct overlay_registers *regs;
 	int ret;
 
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
+	/* No need to check for DRIVER_MODESET - we don't set it up then. */
 	overlay = dev_priv->overlay;
 	if (!overlay) {
 		DRM_DEBUG("userspace bug: no overlay\n");
@@ -1453,8 +1457,8 @@
 	intel_overlay_unmap_regs(overlay, regs);
 
 	dev_priv->overlay = overlay;
+	DRM_UNLOCK(dev);
 	DRM_INFO("initialized overlay support\n");
-	DRM_UNLOCK(dev);
 	return;
 
 out_unpin_bo:

Modified: trunk/sys/dev/drm2/i915/intel_panel.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_panel.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_panel.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2006-2010 Intel Corporation
  * Copyright (c) 2006 Dave Airlie <airlied at linux.ie>
@@ -29,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_panel.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_panel.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -59,7 +60,7 @@
 void
 intel_pch_panel_fitting(struct drm_device *dev,
 			int fitting_mode,
-			struct drm_display_mode *mode,
+			const struct drm_display_mode *mode,
 			struct drm_display_mode *adjusted_mode)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -197,6 +198,20 @@
 	return max;
 }
 
+static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (i915_panel_invert_brightness < 0)
+		return val;
+
+	if (i915_panel_invert_brightness > 0 ||
+	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
+		return intel_panel_get_max_backlight(dev) - val;
+
+	return val;
+}
+
 u32 intel_panel_get_backlight(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -212,12 +227,13 @@
 		if (is_backlight_combination_mode(dev)) {
 			u8 lbpc;
 
-			lbpc = pci_read_config(dev->device, PCI_LBPC, 1);
+			lbpc = pci_read_config(dev->dev, PCI_LBPC, 1);
 			val *= lbpc;
 		}
 	}
 
-	DRM_DEBUG("get backlight PWM = %d\n", val);
+	val = intel_panel_compute_brightness(dev, val);
+	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
 	return val;
 }
 
@@ -233,7 +249,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tmp;
 
-	DRM_DEBUG("set backlight PWM = %d\n", level);
+	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+	level = intel_panel_compute_brightness(dev, level);
 
 	if (HAS_PCH_SPLIT(dev))
 		return intel_pch_panel_set_backlight(dev, level);
@@ -244,7 +261,7 @@
 
 		lbpc = level * 0xfe / max + 1;
 		level /= lbpc;
-		pci_write_config(dev->device, PCI_LBPC, lbpc, 4);
+		pci_write_config(dev->dev, PCI_LBPC, lbpc, 4);
 	}
 
 	tmp = I915_READ(BLC_PWM_CTL);

Modified: trunk/sys/dev/drm2/i915/intel_ringbuffer.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_ringbuffer.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_ringbuffer.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2008-2010 Intel Corporation
  *
@@ -28,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_ringbuffer.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_ringbuffer.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -52,12 +53,14 @@
 void
 i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno)
 {
+	struct drm_i915_private *dev_priv;
 
 	if (ring->trace_irq_seqno == 0) {
-		mtx_lock(&ring->irq_lock);
+		dev_priv = ring->dev->dev_private;
+		mtx_lock(&dev_priv->irq_lock);
 		if (ring->irq_get(ring))
 			ring->trace_irq_seqno = seqno;
-		mtx_unlock(&ring->irq_lock);
+		mtx_unlock(&dev_priv->irq_lock);
 	}
 }
 
@@ -70,10 +73,36 @@
 }
 
 static int
-render_ring_flush(struct intel_ring_buffer *ring,
-		  uint32_t	invalidate_domains,
-		  uint32_t	flush_domains)
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+		       u32	invalidate_domains,
+		       u32	flush_domains)
 {
+	u32 cmd;
+	int ret;
+
+	cmd = MI_FLUSH;
+	if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+		cmd |= MI_NO_WRITE_FLUSH;
+
+	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+		cmd |= MI_READ_FLUSH;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+		  u32	invalidate_domains,
+		  u32	flush_domains)
+{
 	struct drm_device *dev = ring->dev;
 	uint32_t cmd;
 	int ret;
@@ -107,17 +136,8 @@
 	 */
 
 	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-	if ((invalidate_domains|flush_domains) &
-	    I915_GEM_DOMAIN_RENDER)
+	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
 		cmd &= ~MI_NO_WRITE_FLUSH;
-	if (INTEL_INFO(dev)->gen < 4) {
-		/*
-		 * On the 965, the sampler cache always gets flushed
-		 * and this bit is reserved.
-		 */
-		if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
-			cmd |= MI_READ_FLUSH;
-	}
 	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
 		cmd |= MI_EXE_FLUSH;
 
@@ -361,14 +381,12 @@
 		goto err_unref;
 
 	pc->gtt_offset = obj->gtt_offset;
-	pc->cpu_page = (uint32_t *)kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+	pc->cpu_page = (uint32_t *)kva_alloc(PAGE_SIZE);
 	if (pc->cpu_page == NULL)
 		goto err_unpin;
 	pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
-	pmap_invalidate_range(kernel_pmap, (vm_offset_t)pc->cpu_page,
-	    (vm_offset_t)pc->cpu_page + PAGE_SIZE);
 	pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
-	    (vm_offset_t)pc->cpu_page + PAGE_SIZE);
+	    (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE);
 
 	pc->obj = obj;
 	ring->private = pc;
@@ -394,9 +412,7 @@
 
 	obj = pc->obj;
 	pmap_qremove((vm_offset_t)pc->cpu_page, 1);
-	pmap_invalidate_range(kernel_pmap, (vm_offset_t)pc->cpu_page,
-	    (vm_offset_t)pc->cpu_page + PAGE_SIZE);
-	kmem_free(kernel_map, (uintptr_t)pc->cpu_page, PAGE_SIZE);
+	kva_free((uintptr_t)pc->cpu_page, PAGE_SIZE);
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 
@@ -411,12 +427,11 @@
 	int ret = init_ring_common(ring);
 
 	if (INTEL_INFO(dev)->gen > 3) {
-		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
-		I915_WRITE(MI_MODE, mode);
+		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 		if (IS_GEN7(dev))
 			I915_WRITE(GFX_MODE_GEN7,
-				   GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
-				   GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+				   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 	}
 
 	if (INTEL_INFO(dev)->gen >= 5) {
@@ -433,14 +448,19 @@
 		 *  policy is not supported."
 		 */
 		I915_WRITE(CACHE_MODE_0,
-			   CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
-	}
+			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 
-	if (INTEL_INFO(dev)->gen >= 6) {
-		I915_WRITE(INSTPM,
-			   INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+		/* This is not explicitly set for GEN6, so read the register.
+		 * see intel_ring_mi_set_context() for why we care.
+		 * TODO: consider explicitly setting the bit for GEN5
+		 */
+		ring->itlb_before_ctx_switch =
+			!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
 	}
 
+	if (INTEL_INFO(dev)->gen >= 6)
+		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
 	return ret;
 }
 
@@ -510,10 +530,9 @@
  * @seqno - seqno which the waiter will block on
  */
 static int
-intel_ring_sync(struct intel_ring_buffer *waiter,
-		struct intel_ring_buffer *signaller,
-		int ring,
-		u32 seqno)
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+	       struct intel_ring_buffer *signaller,
+	       u32 seqno)
 {
 	int ret;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
@@ -520,11 +539,23 @@
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
 
+	/* Throughout all of the GEM code, seqno passed implies our current
+	 * seqno is >= the last seqno executed. However for hardware the
+	 * comparison is strictly greater than.
+	 */
+	seqno -= 1;
+
+	if (signaller->semaphore_register[waiter->id] ==
+	    MI_SEMAPHORE_SYNC_INVALID)
+		printf("gen6_ring_sync semaphore_register %d invalid\n",
+		    waiter->id);
+
 	ret = intel_ring_begin(waiter, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+	intel_ring_emit(waiter,
+			dw1 | signaller->semaphore_register[waiter->id]);
 	intel_ring_emit(waiter, seqno);
 	intel_ring_emit(waiter, 0);
 	intel_ring_emit(waiter, MI_NOOP);
@@ -540,48 +571,6 @@
 int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
     struct intel_ring_buffer *signaller, u32 seqno);
 
-/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-int
-render_ring_sync_to(struct intel_ring_buffer *waiter,
-		    struct intel_ring_buffer *signaller,
-		    u32 seqno)
-{
-	KASSERT(signaller->semaphore_register[RCS] != MI_SEMAPHORE_SYNC_INVALID,
-	    ("valid RCS semaphore"));
-	return intel_ring_sync(waiter,
-			       signaller,
-			       RCS,
-			       seqno);
-}
-
-/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-int
-gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
-		      struct intel_ring_buffer *signaller,
-		      u32 seqno)
-{
-	KASSERT(signaller->semaphore_register[VCS] != MI_SEMAPHORE_SYNC_INVALID,
-	    ("Valid VCS semaphore"));
-	return intel_ring_sync(waiter,
-			       signaller,
-			       VCS,
-			       seqno);
-}
-
-/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-int
-gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
-		      struct intel_ring_buffer *signaller,
-		      u32 seqno)
-{
-	KASSERT(signaller->semaphore_register[BCS] != MI_SEMAPHORE_SYNC_INVALID,
-	    ("Valid BCS semaphore"));
-	return intel_ring_sync(waiter,
-			       signaller,
-			       BCS,
-			       seqno);
-}
-
 #define PIPE_CONTROL_FLUSH(ring__, addr__)					\
 do {									\
 	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |		\
@@ -642,28 +631,7 @@
 	return 0;
 }
 
-static int
-render_ring_add_request(struct intel_ring_buffer *ring,
-			uint32_t *result)
-{
-	u32 seqno = i915_gem_next_request_seqno(ring);
-	int ret;
-
-	ret = intel_ring_begin(ring, 4);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, seqno);
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_advance(ring);
-
-	*result = seqno;
-	return 0;
-}
-
- static u32
+static u32
 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -694,40 +662,74 @@
 		return (-1);
 }
 
-static void
-ironlake_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
 {
-	dev_priv->gt_irq_mask &= ~mask;
-	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-	POSTING_READ(GTIMR);
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+	if (ring->irq_refcount++ == 0) {
+		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+		POSTING_READ(GTIMR);
+	}
+
+	return true;
 }
 
 static void
-ironlake_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
 {
-	dev_priv->gt_irq_mask |= mask;
-	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-	POSTING_READ(GTIMR);
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+	if (--ring->irq_refcount == 0) {
+		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+		POSTING_READ(GTIMR);
+	}
 }
 
-static void
-i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+static bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
 {
-	dev_priv->irq_mask &= ~mask;
-	I915_WRITE(IMR, dev_priv->irq_mask);
-	POSTING_READ(IMR);
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+	if (ring->irq_refcount++ == 0) {
+		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE(IMR, dev_priv->irq_mask);
+		POSTING_READ(IMR);
+	}
+
+	return true;
 }
 
 static void
-i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
 {
-	dev_priv->irq_mask |= mask;
-	I915_WRITE(IMR, dev_priv->irq_mask);
-	POSTING_READ(IMR);
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+	if (--ring->irq_refcount == 0) {
+		dev_priv->irq_mask |= ring->irq_enable_mask;
+		I915_WRITE(IMR, dev_priv->irq_mask);
+		POSTING_READ(IMR);
+	}
 }
 
 static bool
-render_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -735,13 +737,11 @@
 	if (!dev->irq_enabled)
 		return false;
 
-	mtx_assert(&ring->irq_lock, MA_OWNED);
+	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
 	if (ring->irq_refcount++ == 0) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_enable_irq(dev_priv,
-					    GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
-		else
-			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE16(IMR, dev_priv->irq_mask);
+		POSTING_READ16(IMR);
 	}
 
 	return true;
@@ -748,19 +748,16 @@
 }
 
 static void
-render_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	mtx_assert(&ring->irq_lock, MA_OWNED);
+	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
 	if (--ring->irq_refcount == 0) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_disable_irq(dev_priv,
-					     GT_USER_INTERRUPT |
-					     GT_PIPE_NOTIFY);
-		else
-			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+		dev_priv->irq_mask |= ring->irq_enable_mask;
+		I915_WRITE16(IMR, dev_priv->irq_mask);
+		POSTING_READ16(IMR);
 	}
 }
 
@@ -813,10 +810,10 @@
 }
 
 static int
-ring_add_request(struct intel_ring_buffer *ring,
-		 uint32_t *result)
+i9xx_add_request(struct intel_ring_buffer *ring,
+		 u32 *result)
 {
-	uint32_t seqno;
+	u32 seqno;
 	int ret;
 
 	ret = intel_ring_begin(ring, 4);
@@ -836,7 +833,7 @@
 }
 
 static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag)
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -846,11 +843,12 @@
 
 	gen6_gt_force_wake_get(dev_priv);
 
-	mtx_assert(&ring->irq_lock, MA_OWNED);
+	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
 	if (ring->irq_refcount++ == 0) {
-		ring->irq_mask &= ~rflag;
-		I915_WRITE_IMR(ring, ring->irq_mask);
-		ironlake_enable_irq(dev_priv, gflag);
+		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+		POSTING_READ(GTIMR);
 	}
 
 	return true;
@@ -857,69 +855,55 @@
 }
 
 static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag)
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	mtx_assert(&ring->irq_lock, MA_OWNED);
+	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
 	if (--ring->irq_refcount == 0) {
-		ring->irq_mask |= rflag;
-		I915_WRITE_IMR(ring, ring->irq_mask);
-		ironlake_disable_irq(dev_priv, gflag);
+		I915_WRITE_IMR(ring, ~0);
+		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+		POSTING_READ(GTIMR);
 	}
 
 	gen6_gt_force_wake_put(dev_priv);
 }
 
-static bool
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
+static int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 {
-	struct drm_device *dev = ring->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
 
-	if (!dev->irq_enabled)
-		return false;
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
 
-	mtx_assert(&ring->irq_lock, MA_OWNED);
-	if (ring->irq_refcount++ == 0) {
-		if (IS_G4X(dev))
-			i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
-		else
-			ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
-	}
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START |
+			MI_BATCH_GTT |
+			MI_BATCH_NON_SECURE_I965);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
-	return true;
+	return 0;
 }
-static void
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
-	struct drm_device *dev = ring->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	mtx_assert(&ring->irq_lock, MA_OWNED);
-	if (--ring->irq_refcount == 0) {
-		if (IS_G4X(dev))
-			i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
-		else
-			ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
-	}
-}
-
 static int
-ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint32_t offset,
-    uint32_t length)
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+				u32 offset, u32 len)
 {
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START | (2 << 6) |
-			MI_BATCH_NON_SECURE_I965);
-	intel_ring_emit(ring, offset);
+	intel_ring_emit(ring, MI_BATCH_BUFFER);
+	intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+	intel_ring_emit(ring, offset + len - 8);
+	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
 
 	return 0;
@@ -926,37 +910,17 @@
 }
 
 static int
-render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-				uint32_t offset, uint32_t len)
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+				u32 offset, u32 len)
 {
-	struct drm_device *dev = ring->dev;
 	int ret;
 
-	if (IS_I830(dev) || IS_845G(dev)) {
-		ret = intel_ring_begin(ring, 4);
-		if (ret)
-			return ret;
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
 
-		intel_ring_emit(ring, MI_BATCH_BUFFER);
-		intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
-		intel_ring_emit(ring, offset + len - 8);
-		intel_ring_emit(ring, 0);
-	} else {
-		ret = intel_ring_begin(ring, 2);
-		if (ret)
-			return ret;
-
-		if (INTEL_INFO(dev)->gen >= 4) {
-			intel_ring_emit(ring,
-					MI_BATCH_BUFFER_START | (2 << 6) |
-					MI_BATCH_NON_SECURE_I965);
-			intel_ring_emit(ring, offset);
-		} else {
-			intel_ring_emit(ring,
-					MI_BATCH_BUFFER_START | (2 << 6));
-			intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
-		}
-	}
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
 	intel_ring_advance(ring);
 
 	return 0;
@@ -964,7 +928,6 @@
 
 static void cleanup_status_page(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	struct drm_i915_gem_object *obj;
 
 	obj = ring->status_page.obj;
@@ -972,22 +935,16 @@
 		return;
 
 	pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1);
-	pmap_invalidate_range(kernel_pmap,
-	    (vm_offset_t)ring->status_page.page_addr,
-	    (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
-	kmem_free(kernel_map, (vm_offset_t)ring->status_page.page_addr,
+	kva_free((vm_offset_t)ring->status_page.page_addr,
 	    PAGE_SIZE);
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 	ring->status_page.obj = NULL;
-
-	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 }
 
 static int init_status_page(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	int ret;
 
@@ -1006,19 +963,14 @@
 	}
 
 	ring->status_page.gfx_addr = obj->gtt_offset;
-	ring->status_page.page_addr = (void *)kmem_alloc_nofault(kernel_map,
-	    PAGE_SIZE);
+	ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE);
 	if (ring->status_page.page_addr == NULL) {
-		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 		goto err_unpin;
 	}
 	pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
 	    1);
-	pmap_invalidate_range(kernel_pmap,
-	    (vm_offset_t)ring->status_page.page_addr,
-	    (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
 	pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
-	    (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
+	    (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE);
 	ring->status_page.obj = obj;
 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
@@ -1036,8 +988,7 @@
 	return ret;
 }
 
-static
-int intel_init_ring_buffer(struct drm_device *dev,
+static int intel_init_ring_buffer(struct drm_device *dev,
 			   struct intel_ring_buffer *ring)
 {
 	struct drm_i915_gem_object *obj;
@@ -1047,10 +998,8 @@
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
 	INIT_LIST_HEAD(&ring->gpu_write_list);
+	ring->size = 32 * PAGE_SIZE;
 
-	mtx_init(&ring->irq_lock, "ringb", NULL, MTX_DEF);
-	ring->irq_mask = ~0;
-
 	if (I915_NEED_GFX_HWS(dev)) {
 		ret = init_status_page(ring);
 		if (ret)
@@ -1070,20 +1019,15 @@
 	if (ret)
 		goto err_unref;
 
-	ring->map.size = ring->size;
-	ring->map.offset = dev->agp->base + obj->gtt_offset;
-	ring->map.type = 0;
-	ring->map.flags = 0;
-	ring->map.mtrr = 0;
-
-	drm_core_ioremap_wc(&ring->map, dev);
-	if (ring->map.virtual == NULL) {
+	ring->virtual_start = pmap_mapdev_attr(
+	    dev->agp->base + obj->gtt_offset, ring->size,
+	    VM_MEMATTR_WRITE_COMBINING);
+	if (ring->virtual_start == NULL) {
 		DRM_ERROR("Failed to map ringbuffer.\n");
 		ret = -EINVAL;
 		goto err_unpin;
 	}
 
-	ring->virtual_start = ring->map.virtual;
 	ret = ring->init(ring);
 	if (ret)
 		goto err_unmap;
@@ -1099,7 +1043,7 @@
 	return 0;
 
 err_unmap:
-	drm_core_ioremapfree(&ring->map, dev);
+	pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size);
 err_unpin:
 	i915_gem_object_unpin(obj);
 err_unref:
@@ -1123,7 +1067,7 @@
 	ret = intel_wait_ring_idle(ring);
 	I915_WRITE_CTL(ring, 0);
 
-	drm_core_ioremapfree(&ring->map, ring->dev);
+	pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size);
 
 	i915_gem_object_unpin(ring->obj);
 	drm_gem_object_unreference(&ring->obj->base);
@@ -1137,7 +1081,7 @@
 
 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
 {
-	unsigned int *virt;
+	uint32_t *virt;
 	int rem = ring->size - ring->tail;
 
 	if (ring->space < rem) {
@@ -1146,12 +1090,10 @@
 			return ret;
 	}
 
-	virt = (unsigned int *)((char *)ring->virtual_start + ring->tail);
-	rem /= 8;
-	while (rem--) {
+	virt = (uint32_t *)((char *)ring->virtual_start + ring->tail);
+	rem /= 4;
+	while (rem--)
 		*virt++ = MI_NOOP;
-		*virt++ = MI_NOOP;
-	}
 
 	ring->tail = 0;
 	ring->space = ring_space(ring);
@@ -1172,9 +1114,11 @@
 	was_interruptible = dev_priv->mm.interruptible;
 	dev_priv->mm.interruptible = false;
 
-	ret = i915_wait_request(ring, seqno, true);
+	ret = i915_wait_request(ring, seqno);
 
 	dev_priv->mm.interruptible = was_interruptible;
+	if (!ret)
+		i915_gem_retire_requests_ring(ring);
 
 	return ret;
 }
@@ -1248,15 +1192,13 @@
 		return ret;
 
 	CTR1(KTR_DRM, "ring_wait_begin %s", ring->name);
-	if (drm_core_check_feature(dev, DRIVER_GEM))
-		/* With GEM the hangcheck timer should kick us out of the loop,
-		 * leaving it early runs the risk of corrupting GEM state (due
-		 * to running on almost untested codepaths). But on resume
-		 * timers don't work yet, so prevent a complete hang in that
-		 * case by choosing an insanely large timeout. */
-		end = ticks + hz * 60;
-	else
-		end = ticks + hz * 3;
+	/* With GEM the hangcheck timer should kick us out of the loop,
+	 * leaving it early runs the risk of corrupting GEM state (due
+	 * to running on almost untested codepaths). But on resume
+	 * timers don't work yet, so prevent a complete hang in that
+	 * case by choosing an insanely large timeout. */
+	end = ticks + hz * 60;
+
 	do {
 		ring->head = I915_READ_HEAD(ring);
 		ring->space = ring_space(ring);
@@ -1265,16 +1207,11 @@
 			return 0;
 		}
 
-#if 0
 		if (dev->primary->master) {
 			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 			if (master_priv->sarea_priv)
 				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 		}
-#else
-		if (dev_priv->sarea_priv)
-			dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-#endif
 
 		pause("915rng", 1);
 		if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) {
@@ -1314,51 +1251,17 @@
 
 void intel_ring_advance(struct intel_ring_buffer *ring)
 {
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
 	ring->tail &= ring->size - 1;
+	if (dev_priv->stop_rings & intel_ring_flag(ring))
+		return;
 	ring->write_tail(ring, ring->tail);
 }
 
-static const struct intel_ring_buffer render_ring = {
-	.name			= "render ring",
-	.id			= RCS,
-	.mmio_base		= RENDER_RING_BASE,
-	.size			= 32 * PAGE_SIZE,
-	.init			= init_render_ring,
-	.write_tail		= ring_write_tail,
-	.flush			= render_ring_flush,
-	.add_request		= render_ring_add_request,
-	.get_seqno		= ring_get_seqno,
-	.irq_get		= render_ring_get_irq,
-	.irq_put		= render_ring_put_irq,
-	.dispatch_execbuffer	= render_ring_dispatch_execbuffer,
-	.cleanup		= render_ring_cleanup,
-	.sync_to		= render_ring_sync_to,
-	.semaphore_register	= {MI_SEMAPHORE_SYNC_INVALID,
-				   MI_SEMAPHORE_SYNC_RV,
-				   MI_SEMAPHORE_SYNC_RB},
-	.signal_mbox		= {GEN6_VRSYNC, GEN6_BRSYNC},
-};
 
-/* ring buffer for bit-stream decoder */
-
-static const struct intel_ring_buffer bsd_ring = {
-	.name                   = "bsd ring",
-	.id			= VCS,
-	.mmio_base		= BSD_RING_BASE,
-	.size			= 32 * PAGE_SIZE,
-	.init			= init_ring_common,
-	.write_tail		= ring_write_tail,
-	.flush			= bsd_ring_flush,
-	.add_request		= ring_add_request,
-	.get_seqno		= ring_get_seqno,
-	.irq_get		= bsd_ring_get_irq,
-	.irq_put		= bsd_ring_put_irq,
-	.dispatch_execbuffer	= ring_dispatch_execbuffer,
-};
-
-
 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
-				     uint32_t value)
+				     u32 value)
 {
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 
@@ -1419,81 +1322,12 @@
 	return 0;
 }
 
-static bool
-gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
-{
-	return gen6_ring_get_irq(ring,
-				 GT_USER_INTERRUPT,
-				 GEN6_RENDER_USER_INTERRUPT);
-}
-
-static void
-gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
-{
-	return gen6_ring_put_irq(ring,
-				 GT_USER_INTERRUPT,
-				 GEN6_RENDER_USER_INTERRUPT);
-}
-
-static bool
-gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
-	return gen6_ring_get_irq(ring,
-				 GT_GEN6_BSD_USER_INTERRUPT,
-				 GEN6_BSD_USER_INTERRUPT);
-}
-
-static void
-gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
-	return gen6_ring_put_irq(ring,
-				 GT_GEN6_BSD_USER_INTERRUPT,
-				 GEN6_BSD_USER_INTERRUPT);
-}
-
-/* ring buffer for Video Codec for Gen6+ */
-static const struct intel_ring_buffer gen6_bsd_ring = {
-	.name			= "gen6 bsd ring",
-	.id			= VCS,
-	.mmio_base		= GEN6_BSD_RING_BASE,
-	.size			= 32 * PAGE_SIZE,
-	.init			= init_ring_common,
-	.write_tail		= gen6_bsd_ring_write_tail,
-	.flush			= gen6_ring_flush,
-	.add_request		= gen6_add_request,
-	.get_seqno		= gen6_ring_get_seqno,
-	.irq_get		= gen6_bsd_ring_get_irq,
-	.irq_put		= gen6_bsd_ring_put_irq,
-	.dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
-	.sync_to		= gen6_bsd_ring_sync_to,
-	.semaphore_register	= {MI_SEMAPHORE_SYNC_VR,
-				   MI_SEMAPHORE_SYNC_INVALID,
-				   MI_SEMAPHORE_SYNC_VB},
-	.signal_mbox		= {GEN6_RVSYNC, GEN6_BVSYNC},
-};
-
 /* Blitter support (SandyBridge+) */
 
-static bool
-blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
-	return gen6_ring_get_irq(ring,
-				 GT_BLT_USER_INTERRUPT,
-				 GEN6_BLITTER_USER_INTERRUPT);
-}
-
-static void
-blt_ring_put_irq(struct intel_ring_buffer *ring)
-{
-	gen6_ring_put_irq(ring,
-			  GT_BLT_USER_INTERRUPT,
-			  GEN6_BLITTER_USER_INTERRUPT);
-}
-
 static int blt_ring_flush(struct intel_ring_buffer *ring,
-			  uint32_t invalidate, uint32_t flush)
+			  u32 invalidate, u32 flush)
 {
-	uint32_t cmd;
+	u32 cmd;
 	int ret;
 
 	ret = intel_ring_begin(ring, 4);
@@ -1511,43 +1345,64 @@
 	return 0;
 }
 
-static const struct intel_ring_buffer gen6_blt_ring = {
-	.name			= "blt ring",
-	.id			= BCS,
-	.mmio_base		= BLT_RING_BASE,
-	.size			= 32 * PAGE_SIZE,
-	.init			= init_ring_common,
-	.write_tail		= ring_write_tail,
-	.flush			= blt_ring_flush,
-	.add_request		= gen6_add_request,
-	.get_seqno		= gen6_ring_get_seqno,
-	.irq_get		= blt_ring_get_irq,
-	.irq_put		= blt_ring_put_irq,
-	.dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
-	.sync_to		= gen6_blt_ring_sync_to,
-	.semaphore_register	= {MI_SEMAPHORE_SYNC_BR,
-				   MI_SEMAPHORE_SYNC_BV,
-				   MI_SEMAPHORE_SYNC_INVALID},
-	.signal_mbox		= {GEN6_RBSYNC, GEN6_VBSYNC},
-};
-
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 
-	*ring = render_ring;
+	ring->name = "render ring";
+	ring->id = RCS;
+	ring->mmio_base = RENDER_RING_BASE;
+
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
 		ring->flush = gen6_render_ring_flush;
-		ring->irq_get = gen6_render_ring_get_irq;
-		ring->irq_put = gen6_render_ring_put_irq;
+		ring->irq_get = gen6_ring_get_irq;
+		ring->irq_put = gen6_ring_put_irq;
+		ring->irq_enable_mask = GT_USER_INTERRUPT;
 		ring->get_seqno = gen6_ring_get_seqno;
+		ring->sync_to = gen6_ring_sync;
+		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+		ring->signal_mbox[0] = GEN6_VRSYNC;
+		ring->signal_mbox[1] = GEN6_BRSYNC;
 	} else if (IS_GEN5(dev)) {
 		ring->add_request = pc_render_add_request;
+		ring->flush = gen4_render_ring_flush;
 		ring->get_seqno = pc_render_get_seqno;
+		ring->irq_get = gen5_ring_get_irq;
+		ring->irq_put = gen5_ring_put_irq;
+		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+	} else {
+		ring->add_request = i9xx_add_request;
+		if (INTEL_INFO(dev)->gen < 4)
+			ring->flush = gen2_render_ring_flush;
+		else
+			ring->flush = gen4_render_ring_flush;
+		ring->get_seqno = ring_get_seqno;
+		if (IS_GEN2(dev)) {
+			ring->irq_get = i8xx_ring_get_irq;
+			ring->irq_put = i8xx_ring_put_irq;
+		} else {
+			ring->irq_get = i9xx_ring_get_irq;
+			ring->irq_put = i9xx_ring_put_irq;
+		}
+		ring->irq_enable_mask = I915_USER_INTERRUPT;
 	}
+	ring->write_tail = ring_write_tail;
+	if (INTEL_INFO(dev)->gen >= 6)
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	else if (INTEL_INFO(dev)->gen >= 4)
+		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+	else if (IS_I830(dev) || IS_845G(dev))
+		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+	else
+		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+	ring->init = init_render_ring;
+	ring->cleanup = render_ring_cleanup;
 
+
 	if (!I915_NEED_GFX_HWS(dev)) {
 		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
 		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
@@ -1556,22 +1411,47 @@
 	return intel_init_ring_buffer(dev, ring);
 }
 
-int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start,
-    uint32_t size)
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
 
-	*ring = render_ring;
+	ring->name = "render ring";
+	ring->id = RCS;
+	ring->mmio_base = RENDER_RING_BASE;
+
 	if (INTEL_INFO(dev)->gen >= 6) {
-		ring->add_request = gen6_add_request;
-		ring->irq_get = gen6_render_ring_get_irq;
-		ring->irq_put = gen6_render_ring_put_irq;
-	} else if (IS_GEN5(dev)) {
-		ring->add_request = pc_render_add_request;
-		ring->get_seqno = pc_render_get_seqno;
+		/* non-kms not supported on gen6+ */
+		return -ENODEV;
 	}
 
+	/* Note: gem is not supported on gen5/ilk without kms (the corresponding
+	 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+	 * the special gen5 functions. */
+	ring->add_request = i9xx_add_request;
+	if (INTEL_INFO(dev)->gen < 4)
+		ring->flush = gen2_render_ring_flush;
+	else
+		ring->flush = gen4_render_ring_flush;
+	ring->get_seqno = ring_get_seqno;
+	if (IS_GEN2(dev)) {
+		ring->irq_get = i8xx_ring_get_irq;
+		ring->irq_put = i8xx_ring_put_irq;
+	} else {
+		ring->irq_get = i9xx_ring_get_irq;
+		ring->irq_put = i9xx_ring_put_irq;
+	}
+	ring->irq_enable_mask = I915_USER_INTERRUPT;
+	ring->write_tail = ring_write_tail;
+	if (INTEL_INFO(dev)->gen >= 4)
+		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+	else if (IS_I830(dev) || IS_845G(dev))
+		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+	else
+		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+	ring->init = init_render_ring;
+	ring->cleanup = render_ring_cleanup;
+
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
@@ -1582,20 +1462,14 @@
 	if (IS_I830(ring->dev))
 		ring->effective_size -= 128;
 
-	ring->map.offset = start;
-	ring->map.size = size;
-	ring->map.type = 0;
-	ring->map.flags = 0;
-	ring->map.mtrr = 0;
-
-	drm_core_ioremap_wc(&ring->map, dev);
-	if (ring->map.virtual == NULL) {
+	ring->virtual_start = pmap_mapdev_attr(start, size,
+	    VM_MEMATTR_WRITE_COMBINING);
+	if (ring->virtual_start == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
 		return -ENOMEM;
 	}
 
-	ring->virtual_start = (void *)ring->map.virtual;
 	return 0;
 }
 
@@ -1604,11 +1478,47 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
 
-	if (IS_GEN6(dev) || IS_GEN7(dev))
-		*ring = gen6_bsd_ring;
-	else
-		*ring = bsd_ring;
+	ring->name = "bsd ring";
+	ring->id = VCS;
 
+	ring->write_tail = ring_write_tail;
+	if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		ring->mmio_base = GEN6_BSD_RING_BASE;
+		/* gen6 bsd needs a special wa for tail updates */
+		if (IS_GEN6(dev))
+			ring->write_tail = gen6_bsd_ring_write_tail;
+		ring->flush = gen6_ring_flush;
+		ring->add_request = gen6_add_request;
+		ring->get_seqno = gen6_ring_get_seqno;
+		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+		ring->irq_get = gen6_ring_get_irq;
+		ring->irq_put = gen6_ring_put_irq;
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		ring->sync_to = gen6_ring_sync;
+		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+		ring->signal_mbox[0] = GEN6_RVSYNC;
+		ring->signal_mbox[1] = GEN6_BVSYNC;
+	} else {
+		ring->mmio_base = BSD_RING_BASE;
+		ring->flush = bsd_ring_flush;
+		ring->add_request = i9xx_add_request;
+		ring->get_seqno = ring_get_seqno;
+		if (IS_GEN5(dev)) {
+			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+			ring->irq_get = gen5_ring_get_irq;
+			ring->irq_put = gen5_ring_put_irq;
+		} else {
+			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+			ring->irq_get = i9xx_ring_get_irq;
+			ring->irq_put = i9xx_ring_put_irq;
+		}
+		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+	}
+	ring->init = init_ring_common;
+
+
 	return intel_init_ring_buffer(dev, ring);
 }
 
@@ -1617,7 +1527,25 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
 
-	*ring = gen6_blt_ring;
+	ring->name = "blitter ring";
+	ring->id = BCS;
 
+	ring->mmio_base = BLT_RING_BASE;
+	ring->write_tail = ring_write_tail;
+	ring->flush = blt_ring_flush;
+	ring->add_request = gen6_add_request;
+	ring->get_seqno = gen6_ring_get_seqno;
+	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+	ring->irq_get = gen6_ring_get_irq;
+	ring->irq_put = gen6_ring_put_irq;
+	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	ring->sync_to = gen6_ring_sync;
+	ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+	ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+	ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+	ring->signal_mbox[0] = GEN6_RBSYNC;
+	ring->signal_mbox[1] = GEN6_VBSYNC;
+	ring->init = init_ring_common;
+
 	return intel_init_ring_buffer(dev, ring);
 }

Modified: trunk/sys/dev/drm2/i915/intel_ringbuffer.h
===================================================================
--- trunk/sys/dev/drm2/i915/intel_ringbuffer.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_ringbuffer.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*
- * $FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_ringbuffer.h 235783 2012-05-22 11:07:44Z kib $
+ * $FreeBSD: stable/10/sys/dev/drm2/i915/intel_ringbuffer.h 280369 2015-03-23 13:38:33Z kib $
  */
 
 #ifndef _INTEL_RINGBUFFER_H_
@@ -6,7 +7,7 @@
 #define _INTEL_RINGBUFFER_H_
 
 struct  intel_hw_status_page {
-	uint32_t	*page_addr;
+	u32		*page_addr;
 	unsigned int	gfx_addr;
 	struct		drm_i915_gem_object *obj;
 };
@@ -38,13 +39,13 @@
 		BCS,
 	} id;
 #define I915_NUM_RINGS 3
-	uint32_t	mmio_base;
+	u32		mmio_base;
 	void		*virtual_start;
 	struct		drm_device *dev;
 	struct		drm_i915_gem_object *obj;
 
-	uint32_t	head;
-	uint32_t	tail;
+	u32		head;
+	u32		tail;
 	int		space;
 	int		size;
 	int		effective_size;
@@ -60,13 +61,10 @@
 	 */
 	u32		last_retired_head;
 
-	struct mtx	irq_lock;
-	uint32_t	irq_refcount;
-	uint32_t	irq_mask;
-	uint32_t	irq_seqno;		/* last seq seem at irq time */
-	uint32_t	trace_irq_seqno;
-	uint32_t	waiting_seqno;
-	uint32_t	sync_seqno[I915_NUM_RINGS-1];
+	u32		irq_refcount;
+	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
+	u32		trace_irq_seqno;
+	u32		sync_seqno[I915_NUM_RINGS-1];
 	bool		(*irq_get)(struct intel_ring_buffer *ring);
 	void		(*irq_put)(struct intel_ring_buffer *ring);
 
@@ -120,13 +118,26 @@
 	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
-	uint32_t outstanding_lazy_request;
+	u32 outstanding_lazy_request;
 
+	/**
+	 * Do an explicit TLB flush before MI_SET_CONTEXT
+	 */
+	bool itlb_before_ctx_switch;
+	struct i915_hw_context *default_context;
+	struct drm_i915_gem_object *last_context_obj;
+
 	drm_local_map_t map;
 
 	void *private;
 };
 
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+	return ring->obj != NULL;
+}
+
 static inline unsigned
 intel_ring_flag(struct intel_ring_buffer *ring)
 {
@@ -156,6 +167,8 @@
 intel_read_status_page(struct intel_ring_buffer *ring, int reg)
 {
 
+	/* Ensure that the compiler doesn't optimize away the load. */
+	__compiler_membar();
 	return (atomic_load_acq_32(ring->status_page.page_addr + reg));
 }
 

Modified: trunk/sys/dev/drm2/i915/intel_sdvo.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_sdvo.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_sdvo.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright 2006 Dave Airlie <airlied at linux.ie>
  * Copyright © 2006-2007 Intel Corporation
@@ -27,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: release/9.2.0/sys/dev/drm2/i915/intel_sdvo.c 235783 2012-05-22 11:07:44Z kib $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_sdvo.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -44,7 +45,7 @@
 #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
 #define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
 #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
-#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
 
 #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
 			SDVO_TV_MASK)
@@ -77,7 +78,7 @@
 	device_t ddc_iic_bus, ddc;
 
 	/* Register for the SDVO device: SDVOB or SDVOC */
-	int sdvo_reg;
+	uint32_t sdvo_reg;
 
 	/* Active outputs controlled by this SDVO output */
 	uint16_t controlled_output;
@@ -117,6 +118,9 @@
 	 */
 	bool is_tv;
 
+	/* On different gens SDVOB is at different places. */
+	bool is_sdvob;
+
 	/* This is for current tv format name */
 	int tv_format_index;
 
@@ -266,13 +270,13 @@
 {
 	struct iic_msg msgs[] = {
 		{
-			.slave = intel_sdvo->slave_addr,
+			.slave = intel_sdvo->slave_addr << 1,
 			.flags = 0,
 			.len = 1,
 			.buf = &addr,
 		},
 		{
-			.slave = intel_sdvo->slave_addr,
+			.slave = intel_sdvo->slave_addr << 1,
 			.flags = IIC_M_RD,
 			.len = 1,
 			.buf = ch,
@@ -406,16 +410,14 @@
 	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
 };
 
-#define IS_SDVOB(reg)	(reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
 
-static void
-intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
-    const void *args, int args_len)
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+				   const void *args, int args_len)
 {
 	int i;
 
-	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) == 0)
+	if ((drm_debug & DRM_DEBUGBITS_KMS) == 0)
 		return;
 	DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(intel_sdvo), cmd);
 	for (i = 0; i < args_len; i++)
@@ -422,13 +424,13 @@
 		printf("%02X ", ((const u8 *)args)[i]);
 	for (; i < 8; i++)
 		printf("   ");
-	for (i = 0; i < DRM_ARRAY_SIZE(sdvo_cmd_names); i++) {
+	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
 		if (cmd == sdvo_cmd_names[i].cmd) {
 			printf("(%s)", sdvo_cmd_names[i].name);
 			break;
 		}
 	}
-	if (i == DRM_ARRAY_SIZE(sdvo_cmd_names))
+	if (i == ARRAY_SIZE(sdvo_cmd_names))
 		printf("(%02X)", cmd);
 	printf("\n");
 }
@@ -454,7 +456,7 @@
 	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
 
 	for (i = 0; i < args_len; i++) {
-		msgs[i].slave = intel_sdvo->slave_addr;
+		msgs[i].slave = intel_sdvo->slave_addr << 1;
 		msgs[i].flags = 0;
 		msgs[i].len = 2;
 		msgs[i].buf = buf + 2 *i;
@@ -461,7 +463,7 @@
 		buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
 		buf[2*i + 1] = ((const u8*)args)[i];
 	}
-	msgs[i].slave = intel_sdvo->slave_addr;
+	msgs[i].slave = intel_sdvo->slave_addr << 1;
 	msgs[i].flags = 0;
 	msgs[i].len = 2;
 	msgs[i].buf = buf + 2*i;
@@ -470,12 +472,12 @@
 
 	/* the following two are to read the response */
 	status = SDVO_I2C_CMD_STATUS;
-	msgs[i+1].slave = intel_sdvo->slave_addr;
+	msgs[i+1].slave = intel_sdvo->slave_addr << 1;
 	msgs[i+1].flags = 0;
 	msgs[i+1].len = 1;
 	msgs[i+1].buf = &status;
 
-	msgs[i+2].slave = intel_sdvo->slave_addr;
+	msgs[i+2].slave = intel_sdvo->slave_addr << 1;
 	msgs[i+2].flags = IIC_M_RD;
 	msgs[i+2].len = 1;
 	msgs[i+2].buf = &status;
@@ -524,7 +526,7 @@
 			goto log_fail;
 	}
 
-	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) {
+	if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) {
 		if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
 			printf("(%s)", cmd_status_names[status]);
 		else
@@ -540,15 +542,15 @@
 					  SDVO_I2C_RETURN_0 + i,
 					  &((u8 *)response)[i]))
 			goto log_fail;
-		if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
+		if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
 			printf(" %02X", ((u8 *)response)[i]);
 	}
-	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
+	if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
 		printf("\n");
 	return (true);
 
 log_fail:
-	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
+	if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
 		printf("... failed\n");
 	return (false);
 }
@@ -744,18 +746,18 @@
 	uint16_t h_sync_offset, v_sync_offset;
 	int mode_clock;
 
-	width = mode->crtc_hdisplay;
-	height = mode->crtc_vdisplay;
+	width = mode->hdisplay;
+	height = mode->vdisplay;
 
-	/* do some mode translations */
-	h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
-	h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ 	/* do some mode translations */
+	h_blank_len = mode->htotal - mode->hdisplay;
+	h_sync_len = mode->hsync_end - mode->hsync_start;
 
-	v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
-	v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	v_blank_len = mode->vtotal - mode->vdisplay;
+	v_sync_len = mode->vsync_end - mode->vsync_start;
 
-	h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
-	v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+	h_sync_offset = mode->hsync_start - mode->hdisplay;
+	v_sync_offset = mode->vsync_start - mode->vdisplay;
 
 	mode_clock = mode->clock;
 	mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@@ -884,17 +886,24 @@
 	};
 	uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
 	uint8_t set_buf_index[2] = { 1, 0 };
-	uint64_t *data = (uint64_t *)&avi_if;
+	uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+	uint64_t *data = (uint64_t *)sdvo_data;
 	unsigned i;
 
 	intel_dip_infoframe_csum(&avi_if);
 
+	/* sdvo spec says that the ecc is handled by the hw, and it looks like
+	 * we must not send the ecc field, either. */
+	memcpy(sdvo_data, &avi_if, 3);
+	sdvo_data[3] = avi_if.checksum;
+	memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
 	if (!intel_sdvo_set_value(intel_sdvo,
 				  SDVO_CMD_SET_HBUF_INDEX,
 				  set_buf_index, 2))
 		return false;
 
-	for (i = 0; i < sizeof(avi_if); i += 8) {
+	for (i = 0; i < sizeof(sdvo_data); i += 8) {
 		if (!intel_sdvo_set_value(intel_sdvo,
 					  SDVO_CMD_SET_HBUF_DATA,
 					  data, 8))
@@ -924,7 +933,7 @@
 
 static bool
 intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
-					struct drm_display_mode *mode)
+					const struct drm_display_mode *mode)
 {
 	struct intel_sdvo_dtd output_dtd;
 
@@ -941,7 +950,7 @@
 
 static bool
 intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
-					struct drm_display_mode *mode,
+					const struct drm_display_mode *mode,
 					struct drm_display_mode *adjusted_mode)
 {
 	/* Reset the input timing to the screen. Assume always input 0. */
@@ -964,7 +973,7 @@
 }
 
 static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
-				  struct drm_display_mode *mode,
+				  const struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
 {
 	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
@@ -1272,7 +1281,8 @@
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 
 	return drm_get_edid(connector,
-			    dev_priv->gmbus[dev_priv->crt_ddc_pin]);
+			    intel_gmbus_get_adapter(dev_priv,
+						    dev_priv->crt_ddc_pin));
 }
 
 static enum drm_connector_status
@@ -1323,7 +1333,6 @@
 			}
 		} else
 			status = connector_status_disconnected;
-		connector->display_info.raw_edid = NULL;
 		free(edid, DRM_MEM_KMS);
 	}
 
@@ -1361,8 +1370,7 @@
 		return connector_status_unknown;
 
 	/* add 30ms delay when the output type might be TV */
-	if (intel_sdvo->caps.output_flags &
-	    (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+	if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
 		drm_msleep(30, "915svo");
 
 	if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@@ -1398,7 +1406,6 @@
 			else
 				ret = connector_status_disconnected;
 
-			connector->display_info.raw_edid = NULL;
 			free(edid, DRM_MEM_KMS);
 		} else
 			ret = connector_status_connected;
@@ -1444,7 +1451,6 @@
 			drm_add_edid_modes(connector, edid);
 		}
 
-		connector->display_info.raw_edid = NULL;
 		free(edid, DRM_MEM_KMS);
 	}
 }
@@ -1539,7 +1545,7 @@
 	if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
 		return;
 
-	for (i = 0; i < DRM_ARRAY_SIZE(sdvo_tv_modes); i++)
+	for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
 		if (reply & (1 << i)) {
 			struct drm_display_mode *nmode;
 			nmode = drm_mode_duplicate(connector->dev,
@@ -1582,9 +1588,6 @@
 			intel_sdvo->sdvo_lvds_fixed_mode =
 				drm_mode_duplicate(connector->dev, newmode);
 
-			drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
-					      0);
-
 			intel_sdvo->is_lvds = true;
 			break;
 		}
@@ -1692,7 +1695,7 @@
 	uint8_t cmd;
 	int ret;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret)
 		return ret;
 
@@ -1747,7 +1750,7 @@
 	} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
 		temp_value = val;
 		if (intel_sdvo_connector->left == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 intel_sdvo_connector->right, val);
 			if (intel_sdvo_connector->left_margin == temp_value)
 				return 0;
@@ -1759,7 +1762,7 @@
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
 			goto set_value;
 		} else if (intel_sdvo_connector->right == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 intel_sdvo_connector->left, val);
 			if (intel_sdvo_connector->right_margin == temp_value)
 				return 0;
@@ -1771,7 +1774,7 @@
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
 			goto set_value;
 		} else if (intel_sdvo_connector->top == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 intel_sdvo_connector->bottom, val);
 			if (intel_sdvo_connector->top_margin == temp_value)
 				return 0;
@@ -1783,7 +1786,7 @@
 			cmd = SDVO_CMD_SET_OVERSCAN_V;
 			goto set_value;
 		} else if (intel_sdvo_connector->bottom == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 intel_sdvo_connector->top, val);
 			if (intel_sdvo_connector->bottom_margin == temp_value)
 				return 0;
@@ -1858,7 +1861,7 @@
 		drm_mode_destroy(encoder->dev,
 				 intel_sdvo->sdvo_lvds_fixed_mode);
 
-	device_delete_child(intel_sdvo->base.base.dev->device,
+	device_delete_child(intel_sdvo->base.base.dev->dev,
 	    intel_sdvo->ddc_iic_bus);
 	intel_encoder_destroy(encoder);
 }
@@ -1916,7 +1919,7 @@
 {
 	struct sdvo_device_mapping *mapping;
 
-	if (IS_SDVOB(reg))
+	if (sdvo->is_sdvob)
 		mapping = &(dev_priv->sdvo_mappings[0]);
 	else
 		mapping = &(dev_priv->sdvo_mappings[1]);
@@ -1934,7 +1937,7 @@
 	struct sdvo_device_mapping *mapping;
 	u8 pin;
 
-	if (IS_SDVOB(reg))
+	if (sdvo->is_sdvob)
 		mapping = &dev_priv->sdvo_mappings[0];
 	else
 		mapping = &dev_priv->sdvo_mappings[1];
@@ -1943,12 +1946,12 @@
 	if (mapping->initialized)
 		pin = mapping->i2c_pin;
 
-	if (pin < GMBUS_NUM_PORTS) {
-		sdvo->i2c = dev_priv->gmbus[pin];
+	if (intel_gmbus_is_port_valid(pin)) {
+		sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
 		intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
 		intel_gmbus_force_bit(sdvo->i2c, true);
 	} else {
-		sdvo->i2c = dev_priv->gmbus[GMBUS_PORT_DPB];
+		sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
 	}
 }
 
@@ -1959,12 +1962,12 @@
 }
 
 static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct sdvo_device_mapping *my_mapping, *other_mapping;
 
-	if (IS_SDVOB(sdvo_reg)) {
+	if (sdvo->is_sdvob) {
 		my_mapping = &dev_priv->sdvo_mappings[0];
 		other_mapping = &dev_priv->sdvo_mappings[1];
 	} else {
@@ -1989,7 +1992,7 @@
 	/* No SDVO device info is found for another DVO port,
 	 * so use mapping assumption we had before BIOS parsing.
 	 */
-	if (IS_SDVOB(sdvo_reg))
+	if (sdvo->is_sdvob)
 		return 0x70;
 	else
 		return 0x72;
@@ -2214,6 +2217,10 @@
 		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
 			return false;
 
+	if (flags & SDVO_OUTPUT_YPRPB0)
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+			return false;
+
 	if (flags & SDVO_OUTPUT_RGB0)
 		if (!intel_sdvo_analog_init(intel_sdvo, 0))
 			return false;
@@ -2285,7 +2292,7 @@
 				i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
 
 	intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
-	drm_connector_attach_property(&intel_sdvo_connector->base.base,
+	drm_object_attach_property(&intel_sdvo_connector->base.base.base,
 				      intel_sdvo_connector->tv_format, 0);
 	return true;
 
@@ -2301,7 +2308,7 @@
 		intel_sdvo_connector->name = \
 			drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
 		if (!intel_sdvo_connector->name) return false; \
-		drm_connector_attach_property(connector, \
+		drm_object_attach_property(&connector->base, \
 					      intel_sdvo_connector->name, \
 					      intel_sdvo_connector->cur_##name); \
 		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2338,7 +2345,7 @@
 		if (!intel_sdvo_connector->left)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->left,
 					      intel_sdvo_connector->left_margin);
 
@@ -2347,7 +2354,7 @@
 		if (!intel_sdvo_connector->right)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->right,
 					      intel_sdvo_connector->right_margin);
 		DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2382,7 @@
 		if (!intel_sdvo_connector->top)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->top,
 					      intel_sdvo_connector->top_margin);
 
@@ -2385,7 +2392,7 @@
 		if (!intel_sdvo_connector->bottom)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->bottom,
 					      intel_sdvo_connector->bottom_margin);
 		DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2417,7 +2424,7 @@
 		if (!intel_sdvo_connector->dot_crawl)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->dot_crawl,
 					      intel_sdvo_connector->cur_dot_crawl);
 		DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2544,7 +2551,7 @@
 	struct intel_sdvo_ddc_proxy_sc *sc;
 	int ret;
 
-	sdvo->ddc_iic_bus = device_add_child(dev->device,
+	sdvo->ddc_iic_bus = device_add_child(dev->dev,
 	    "intel_sdvo_ddc_proxy", sdvo_reg);
 	if (sdvo->ddc_iic_bus == NULL) {
 		DRM_ERROR("cannot create ddc proxy bus %d\n", sdvo_reg);
@@ -2555,7 +2562,7 @@
 	if (ret != 0) {
 		DRM_ERROR("cannot attach proxy bus %d error %d\n",
 		    sdvo_reg, ret);
-		device_delete_child(dev->device, sdvo->ddc_iic_bus);
+		device_delete_child(dev->dev, sdvo->ddc_iic_bus);
 		return (false);
 	}
 	sc = device_get_softc(sdvo->ddc_iic_bus);
@@ -2583,7 +2590,7 @@
     intel_sdvo_devclass, 0, 0, SI_ORDER_FIRST);
 
 
-bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_encoder *intel_encoder;
@@ -2594,7 +2601,8 @@
 	    M_WAITOK | M_ZERO);
 
 	intel_sdvo->sdvo_reg = sdvo_reg;
-	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+	intel_sdvo->is_sdvob = is_sdvob;
+	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
 	intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
 	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev, sdvo_reg)) {
 		free(intel_sdvo, DRM_MEM_KMS);
@@ -2611,13 +2619,13 @@
 		u8 byte;
 
 		if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
-			DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
-				      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+			DRM_DEBUG_KMS("No SDVO device found on %s\n",
+				      SDVO_NAME(intel_sdvo));
 			goto err;
 		}
 	}
 
-	if (IS_SDVOB(sdvo_reg))
+	if (intel_sdvo->is_sdvob)
 		dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
 	else
 		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
@@ -2636,12 +2644,12 @@
 			     &intel_sdvo->hotplug_active, 2);
 	intel_sdvo->hotplug_active[0] &= ~0x3;
 
-	if (!intel_sdvo_output_setup(intel_sdvo,
-	    intel_sdvo->caps.output_flags)) {
-		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
-			      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
-		goto err;
-	}
+ 	if (intel_sdvo_output_setup(intel_sdvo,
+ 				    intel_sdvo->caps.output_flags) != true) {
+		DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+			      SDVO_NAME(intel_sdvo));
+ 		goto err;
+ 	}
 
 	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 

Modified: trunk/sys/dev/drm2/i915/intel_sdvo_regs.h
===================================================================
--- trunk/sys/dev/drm2/i915/intel_sdvo_regs.h	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_sdvo_regs.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2006-2007 Intel Corporation
  *
@@ -23,7 +24,7 @@
  * Authors:
  *	Eric Anholt <eric at anholt.net>
  *
- * $MidnightBSD$
+ * $FreeBSD: stable/10/sys/dev/drm2/i915/intel_sdvo_regs.h 235783 2012-05-22 11:07:44Z kib $
  */
 
 /**

Modified: trunk/sys/dev/drm2/i915/intel_sprite.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_sprite.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_sprite.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2011 Intel Corporation
  *
@@ -31,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_sprite.c 280369 2015-03-23 13:38:33Z kib $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -114,14 +115,18 @@
 	 * when scaling is disabled.
 	 */
 	if (crtc_w != src_w || crtc_h != src_h) {
-		dev_priv->sprite_scaling_enabled = true;
-		sandybridge_update_wm(dev);
-		intel_wait_for_vblank(dev, pipe);
+		if (!dev_priv->sprite_scaling_enabled) {
+			dev_priv->sprite_scaling_enabled = true;
+			intel_update_watermarks(dev);
+			intel_wait_for_vblank(dev, pipe);
+		}
 		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 	} else {
-		dev_priv->sprite_scaling_enabled = false;
-		/* potentially re-enable LP watermarks */
-		sandybridge_update_wm(dev);
+		if (dev_priv->sprite_scaling_enabled) {
+			dev_priv->sprite_scaling_enabled = false;
+			/* potentially re-enable LP watermarks */
+			intel_update_watermarks(dev);
+		}
 	}
 
 	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -137,7 +142,7 @@
 	I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPRSCALE(pipe), sprscale);
 	I915_WRITE(SPRCTL(pipe), sprctl);
-	I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+	I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
 	POSTING_READ(SPRSURF(pipe));
 }
 
@@ -153,8 +158,11 @@
 	/* Can't leave the scaler enabled... */
 	I915_WRITE(SPRSCALE(pipe), 0);
 	/* Activate double buffered register update */
-	I915_WRITE(SPRSURF(pipe), 0);
+	I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
 	POSTING_READ(SPRSURF(pipe));
+
+	dev_priv->sprite_scaling_enabled = false;
+	intel_update_watermarks(dev);
 }
 
 static int
@@ -212,7 +220,7 @@
 }
 
 static void
-snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
 		 unsigned int crtc_w, unsigned int crtc_h,
 		 uint32_t x, uint32_t y,
@@ -222,7 +230,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	int pipe = intel_plane->pipe, pixel_size;
-	u32 dvscntr, dvsscale = 0;
+	u32 dvscntr, dvsscale;
 
 	dvscntr = I915_READ(DVSCNTR(pipe));
 
@@ -266,8 +274,8 @@
 	if (obj->tiling_mode != I915_TILING_NONE)
 		dvscntr |= DVS_TILED;
 
-	/* must disable */
-	dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+	if (IS_GEN6(dev))
+		dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
 	dvscntr |= DVS_ENABLE;
 
 	/* Sizes are 0 based */
@@ -278,7 +286,8 @@
 
 	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
 
-	if (crtc_w != src_w || crtc_h != src_h)
+	dvsscale = 0;
+	if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
 		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
 	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -294,12 +303,12 @@
 	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
-	I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+	I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
 	POSTING_READ(DVSSURF(pipe));
 }
 
 static void
-snb_disable_plane(struct drm_plane *plane)
+ilk_disable_plane(struct drm_plane *plane)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -310,7 +319,7 @@
 	/* Disable the scaler */
 	I915_WRITE(DVSSCALE(pipe), 0);
 	/* Flush double buffered register updates */
-	I915_WRITE(DVSSURF(pipe), 0);
+	I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
 	POSTING_READ(DVSSURF(pipe));
 }
 
@@ -337,7 +346,7 @@
 }
 
 static int
-snb_update_colorkey(struct drm_plane *plane,
+ilk_update_colorkey(struct drm_plane *plane,
 		    struct drm_intel_sprite_colorkey *key)
 {
 	struct drm_device *dev = plane->dev;
@@ -366,7 +375,7 @@
 }
 
 static void
-snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -554,14 +563,13 @@
 			      struct drm_file *file_priv)
 {
 	struct drm_intel_sprite_colorkey *set = data;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_mode_object *obj;
 	struct drm_plane *plane;
 	struct intel_plane *intel_plane;
 	int ret = 0;
 
-	if (!dev_priv)
-		return -EINVAL;
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
 
 	/* Make sure we don't try to enable both src & dest simultaneously */
 	if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@@ -588,14 +596,13 @@
 			      struct drm_file *file_priv)
 {
 	struct drm_intel_sprite_colorkey *get = data;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_mode_object *obj;
 	struct drm_plane *plane;
 	struct intel_plane *intel_plane;
 	int ret = 0;
 
-	if (!dev_priv)
-		return -EINVAL;
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
 
 	sx_xlock(&dev->mode_config.mutex);
 
@@ -620,6 +627,14 @@
 	.destroy = intel_destroy_plane,
 };
 
+static uint32_t ilk_plane_formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
 static uint32_t snb_plane_formats[] = {
 	DRM_FORMAT_XBGR8888,
 	DRM_FORMAT_XRGB8888,
@@ -634,33 +649,55 @@
 {
 	struct intel_plane *intel_plane;
 	unsigned long possible_crtcs;
+	const uint32_t *plane_formats;
+	int num_plane_formats;
 	int ret;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (INTEL_INFO(dev)->gen < 5)
 		return -ENODEV;
 
 	intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS,
 	    M_WAITOK | M_ZERO);
 
-	if (IS_GEN6(dev)) {
+	switch (INTEL_INFO(dev)->gen) {
+	case 5:
+	case 6:
 		intel_plane->max_downscale = 16;
-		intel_plane->update_plane = snb_update_plane;
-		intel_plane->disable_plane = snb_disable_plane;
-		intel_plane->update_colorkey = snb_update_colorkey;
-		intel_plane->get_colorkey = snb_get_colorkey;
-	} else if (IS_GEN7(dev)) {
+		intel_plane->update_plane = ilk_update_plane;
+		intel_plane->disable_plane = ilk_disable_plane;
+		intel_plane->update_colorkey = ilk_update_colorkey;
+		intel_plane->get_colorkey = ilk_get_colorkey;
+
+		if (IS_GEN6(dev)) {
+			plane_formats = snb_plane_formats;
+			num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats);
+		} else {
+			plane_formats = ilk_plane_formats;
+			num_plane_formats = DRM_ARRAY_SIZE(ilk_plane_formats);
+		}
+		break;
+
+	case 7:
 		intel_plane->max_downscale = 2;
 		intel_plane->update_plane = ivb_update_plane;
 		intel_plane->disable_plane = ivb_disable_plane;
 		intel_plane->update_colorkey = ivb_update_colorkey;
 		intel_plane->get_colorkey = ivb_get_colorkey;
+
+		plane_formats = snb_plane_formats;
+		num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats);
+		break;
+
+	default:
+		return -ENODEV;
 	}
 
 	intel_plane->pipe = pipe;
 	possible_crtcs = (1 << pipe);
 	ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
-			     &intel_plane_funcs, snb_plane_formats,
-			     DRM_ARRAY_SIZE(snb_plane_formats), false);
+			     &intel_plane_funcs,
+			     plane_formats, num_plane_formats,
+			     false);
 	if (ret)
 		free(intel_plane, DRM_MEM_KMS);
 

Modified: trunk/sys/dev/drm2/i915/intel_tv.c
===================================================================
--- trunk/sys/dev/drm2/i915/intel_tv.c	2018-05-28 00:12:27 UTC (rev 10116)
+++ trunk/sys/dev/drm2/i915/intel_tv.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*
  * Copyright © 2006-2008 Intel Corporation
  *   Jesse Barnes <jesse.barnes at intel.com>
@@ -31,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/intel_tv.c 282199 2015-04-28 19:35:05Z dumbbell $");
 
 #include <dev/drm2/drmP.h>
 #include <dev/drm2/drm.h>
@@ -814,7 +815,7 @@
 {
 	int i;
 
-	for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
+	for (i = 0; i < DRM_ARRAY_SIZE(tv_modes); i++) {
 		const struct tv_mode *tv_mode = &tv_modes[i];
 
 		if (!strcmp(tv_format, tv_mode->name))
@@ -846,7 +847,7 @@
 
 
 static bool
-intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+intel_tv_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
 		    struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -1155,6 +1156,15 @@
 		   DAC_B_0_7_V |
 		   DAC_C_0_7_V);
 
+
+	/*
+	 * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+	 * the TV is misdetected. This is hardware requirement.
+	 */
+	if (IS_GM45(dev))
+		tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+			    TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
 	I915_WRITE(TV_CTL, tv_ctl);
 	I915_WRITE(TV_DAC, tv_dac);
 	POSTING_READ(TV_DAC);
@@ -1224,7 +1234,7 @@
 	}
 
 	intel_tv->tv_format = tv_mode->name;
-	drm_connector_property_set_value(connector,
+	drm_object_property_set_value(&connector->base,
 		connector->dev->mode_config.tv_mode_property, i);
 }
 
@@ -1244,9 +1254,7 @@
 	mode = reported_modes[0];
 	drm_mode_set_crtcinfo(&mode, 0);
 
-	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
-		type = intel_tv_detect_type(intel_tv, connector);
-	} else if (force) {
+	if (force) {
 		struct intel_load_detect_pipe tmp;
 
 		if (intel_get_load_detect_pipe(&intel_tv->base, connector,
@@ -1319,7 +1327,7 @@
 	int j, count = 0;
 	u64 tmp;
 
-	for (j = 0; j < DRM_ARRAY_SIZE(input_res_table);
+	for (j = 0; j < ARRAY_SIZE(input_res_table);
 	     j++) {
 		const struct input_res *input = &input_res_table[j];
 		unsigned int hactive_s = input->w;
@@ -1386,7 +1394,7 @@
 	int ret = 0;
 	bool changed = false;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret < 0)
 		goto out;
 
@@ -1407,7 +1415,7 @@
 		intel_tv->margin[TV_MARGIN_BOTTOM] = val;
 		changed = true;
 	} else if (property == dev->mode_config.tv_mode_property) {
-		if (val >= DRM_ARRAY_SIZE(tv_modes)) {
+		if (val >= ARRAY_SIZE(tv_modes)) {
 			ret = -EINVAL;
 			goto out;
 		}
@@ -1499,7 +1507,7 @@
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
 	u32 tv_dac_on, tv_dac_off, save_tv_dac;
-	char *tv_format_names[DRM_ARRAY_SIZE(tv_modes)];
+	char *tv_format_names[ARRAY_SIZE(tv_modes)];
 	int i, initial_mode = 0;
 
 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1583,24 +1591,24 @@
 	connector->doublescan_allowed = false;
 
 	/* Create TV properties then attach current values */
-	for (i = 0; i < DRM_ARRAY_SIZE(tv_modes); i++)
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
 		tv_format_names[i] = __DECONST(char *, tv_modes[i].name);
 	drm_mode_create_tv_properties(dev,
-				      DRM_ARRAY_SIZE(tv_modes),
+				      ARRAY_SIZE(tv_modes),
 				      tv_format_names);
 
-	drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+	drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
 				   initial_mode);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_left_margin_property,
 				   intel_tv->margin[TV_MARGIN_LEFT]);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_top_margin_property,
 				   intel_tv->margin[TV_MARGIN_TOP]);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_right_margin_property,
 				   intel_tv->margin[TV_MARGIN_RIGHT]);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_bottom_margin_property,
 				   intel_tv->margin[TV_MARGIN_BOTTOM]);
 #if 0

Added: trunk/sys/dev/drm2/radeon/ObjectID.h
===================================================================
--- trunk/sys/dev/drm2/radeon/ObjectID.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/ObjectID.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,700 @@
+/* $MidnightBSD$ */
+/*
+* Copyright 2006-2007 Advanced Micro Devices, Inc.  
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
+
+#ifndef _OBJECTID_H
+#define _OBJECTID_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/ObjectID.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#if defined(_X86_)
+#pragma pack(1)
+#endif
+
+/****************************************************/
+/* Graphics Object Type Definition                  */
+/****************************************************/
+#define GRAPH_OBJECT_TYPE_NONE                    0x0
+#define GRAPH_OBJECT_TYPE_GPU                     0x1
+#define GRAPH_OBJECT_TYPE_ENCODER                 0x2
+#define GRAPH_OBJECT_TYPE_CONNECTOR               0x3
+#define GRAPH_OBJECT_TYPE_ROUTER                  0x4
+/* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH            0x6  
+#define GRAPH_OBJECT_TYPE_GENERIC                 0x7
+
+/****************************************************/
+/* Encoder Object ID Definition                     */
+/****************************************************/
+#define ENCODER_OBJECT_ID_NONE                    0x00 
+
+/* Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_LVDS           0x01
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS1          0x02
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS2          0x03
+#define ENCODER_OBJECT_ID_INTERNAL_DAC1           0x04
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05     /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOA          0x06
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOB          0x07
+
+/* External Third Party Encoders */
+#define ENCODER_OBJECT_ID_SI170B                  0x08
+#define ENCODER_OBJECT_ID_CH7303                  0x09
+#define ENCODER_OBJECT_ID_CH7301                  0x0A
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B    /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA          0x0C
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB          0x0D
+#define ENCODER_OBJECT_ID_TITFP513                0x0E
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F    /* not used for Radeon */
+#define ENCODER_OBJECT_ID_VT1623                  0x10
+#define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
+#define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
+#define ENCODER_OBJECT_ID_ALMOND                  0x22
+#define ENCODER_OBJECT_ID_TRAVIS                  0x23
+#define ENCODER_OBJECT_ID_NUTMEG                  0x22
+/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1    0x15
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16  /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178                   0X17  /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18  /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_DDI            0x19
+#define ENCODER_OBJECT_ID_VT1625                  0x1A
+#define ENCODER_OBJECT_ID_HDMI_SI1932             0x1B
+#define ENCODER_OBJECT_ID_DP_AN9801               0x1C
+#define ENCODER_OBJECT_ID_DP_DP501                0x1D
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY         0x1E
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA   0x1F
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1        0x20
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2        0x21
+#define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
+
+#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
+
+/****************************************************/
+/* Connector Object ID Definition                   */
+/****************************************************/
+#define CONNECTOR_OBJECT_ID_NONE                  0x00 
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I     0x01
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I       0x02
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D     0x03
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D       0x04
+#define CONNECTOR_OBJECT_ID_VGA                   0x05
+#define CONNECTOR_OBJECT_ID_COMPOSITE             0x06
+#define CONNECTOR_OBJECT_ID_SVIDEO                0x07
+#define CONNECTOR_OBJECT_ID_YPbPr                 0x08
+#define CONNECTOR_OBJECT_ID_D_CONNECTOR           0x09
+#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A  /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_SCART                 0x0B
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A           0x0C
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B           0x0D
+#define CONNECTOR_OBJECT_ID_LVDS                  0x0E
+#define CONNECTOR_OBJECT_ID_7PIN_DIN              0x0F
+#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR        0x10
+#define CONNECTOR_OBJECT_ID_CROSSFIRE             0x11
+#define CONNECTOR_OBJECT_ID_HARDCODE_DVI          0x12
+#define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
+#define CONNECTOR_OBJECT_ID_eDP                   0x14
+#define CONNECTOR_OBJECT_ID_MXM                   0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+
+/* deleted */
+
+/****************************************************/
+/* Router Object ID Definition                      */
+/****************************************************/
+#define ROUTER_OBJECT_ID_NONE											0x00
+#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL				0x01
+
+/****************************************************/
+/* Generic Object ID Definition                     */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE                    0x00
+#define GENERIC_OBJECT_ID_GLSYNC                  0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
+#define GENERIC_OBJECT_ID_MXM_OPM                 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN              0x04        //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+
+/****************************************************/
+/* Graphics Object ENUM ID Definition               */
+/****************************************************/
+#define GRAPH_OBJECT_ENUM_ID1                     0x01
+#define GRAPH_OBJECT_ENUM_ID2                     0x02
+#define GRAPH_OBJECT_ENUM_ID3                     0x03
+#define GRAPH_OBJECT_ENUM_ID4                     0x04
+#define GRAPH_OBJECT_ENUM_ID5                     0x05
+#define GRAPH_OBJECT_ENUM_ID6                     0x06
+#define GRAPH_OBJECT_ENUM_ID7                     0x07
+
+/****************************************************/
+/* Graphics Object ID Bit definition                */
+/****************************************************/
+#define OBJECT_ID_MASK                            0x00FF
+#define ENUM_ID_MASK                              0x0700
+#define RESERVED1_ID_MASK                         0x0800
+#define OBJECT_TYPE_MASK                          0x7000
+#define RESERVED2_ID_MASK                         0x8000
+                                                  
+#define OBJECT_ID_SHIFT                           0x00
+#define ENUM_ID_SHIFT                             0x08
+#define OBJECT_TYPE_SHIFT                         0x0C
+
+
+/****************************************************/
+/* Graphics Object family definition                */
+/****************************************************/
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+                                                                           GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
+/****************************************************/
+/* GPU Object ID definition - Shared with BIOS      */
+/****************************************************/
+#define GPU_ENUM_ID1                            ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+
+/****************************************************/
+/* Encoder Object ID definition - Shared with BIOS  */
+/****************************************************/
+/*
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101      
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1       0x2102
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1       0x2103
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1        0x2104
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1        0x2105
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1       0x2106
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1       0x2107
+#define ENCODER_SIL170B_ENUM_ID1              0x2108  
+#define ENCODER_CH7303_ENUM_ID1               0x2109
+#define ENCODER_CH7301_ENUM_ID1               0x210A
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1        0x210B
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1       0x210C
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1       0x210D
+#define ENCODER_TITFP513_ENUM_ID1             0x210E
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1       0x210F
+#define ENCODER_VT1623_ENUM_ID1               0x2110
+#define ENCODER_HDMI_SI1930_ENUM_ID1          0x2111
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1        0x2112
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   0x2113
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    0x2114
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    0x2115
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116  
+#define ENCODER_SI178_ENUM_ID1                   0x2117 
+#define ENCODER_MVPU_FPGA_ENUM_ID1               0x2118
+#define ENCODER_INTERNAL_DDI_ENUM_ID1            0x2119
+#define ENCODER_VT1625_ENUM_ID1                  0x211A
+#define ENCODER_HDMI_SI1932_ENUM_ID1             0x211B
+#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1       0x211C
+#define ENCODER_DP_DP501_ENUM_ID1                0x211D
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         0x211E
+*/
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1           ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1          ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT)  // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1                    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)  
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1                ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1     (  GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) 
+
+#define ENCODER_VT1625_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)  
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
+#define ENCODER_VCE_ENUM_ID1                     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Connector Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1        0x3101
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1          0x3102
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1        0x3103
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1          0x3104
+#define CONNECTOR_VGA_ENUM_ID1                      0x3105
+#define CONNECTOR_COMPOSITE_ENUM_ID1                0x3106
+#define CONNECTOR_SVIDEO_ENUM_ID1                   0x3107
+#define CONNECTOR_YPbPr_ENUM_ID1                    0x3108
+#define CONNECTOR_D_CONNECTORE_ENUM_ID1             0x3109
+#define CONNECTOR_9PIN_DIN_ENUM_ID1                 0x310A
+#define CONNECTOR_SCART_ENUM_ID1                    0x310B
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1              0x310C
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1              0x310D
+#define CONNECTOR_LVDS_ENUM_ID1                     0x310E
+#define CONNECTOR_7PIN_DIN_ENUM_ID1                 0x310F
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1           0x3110
+*/
+#define CONNECTOR_LVDS_ENUM_ID1                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DAC
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Router Object ID definition - Shared with BIOS   */
+/****************************************************/
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1      ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+                                                GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+
+/* deleted */
+
+/****************************************************/
+/* Generic Object ID definition - Shared with BIOS  */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1        (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Object Cap definition - Shared with BIOS         */
+/****************************************************/
+#define GRAPHICS_OBJECT_CAP_I2C                 0x00000001L
+#define GRAPHICS_OBJECT_CAP_TABLE_ID            0x00000002L
+
+
+#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID                   0x01
+#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID     0x02
+#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID    0x03
+
+#if defined(_X86_)
+#pragma pack()
+#endif
+
+#endif  /*GRAPHICTYPE */
+
+
+
+


Property changes on: trunk/sys/dev/drm2/radeon/ObjectID.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/README
===================================================================
--- trunk/sys/dev/drm2/radeon/README	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/README	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,6 @@
+# $FreeBSD: stable/10/sys/dev/drm2/radeon/README 254885 2013-08-25 19:37:15Z dumbbell $
+
+== Updates to reg_srcs/ files ==
+
+When a file in the "reg_srcs" subdirectory in updated, be sure to regen
+headers by running "make" in tools/tools/drm/radeon/mkregtable.


Property changes on: trunk/sys/dev/drm2/radeon/README
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atom-bits.h
===================================================================
--- trunk/sys/dev/drm2/radeon/atom-bits.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atom-bits.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,52 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_BITS_H
+#define ATOM_BITS_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atom-bits.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static inline uint8_t get_u8(void *bios, int ptr)
+{
+    return ((unsigned char *)bios)[ptr];
+}
+#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
+#define CU8(ptr) get_u8(ctx->bios, (ptr))
+static inline uint16_t get_u16(void *bios, int ptr)
+{
+    return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+}
+#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
+#define CU16(ptr) get_u16(ctx->bios, (ptr))
+static inline uint32_t get_u32(void *bios, int ptr)
+{
+    return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
+}
+#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
+#define CU32(ptr) get_u32(ctx->bios, (ptr))
+#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/atom-bits.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atom-names.h
===================================================================
--- trunk/sys/dev/drm2/radeon/atom-names.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atom-names.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,104 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_NAMES_H
+#define ATOM_NAMES_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atom-names.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include "atom.h"
+
+#ifdef ATOM_DEBUG
+
+#define ATOM_OP_NAMES_CNT 123
+static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
+"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
+"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
+"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
+"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
+"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
+"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
+"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
+"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
+"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
+"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
+"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
+"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
+"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
+"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
+"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
+"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
+"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
+"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
+"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
+"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
+"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
+"DEBUG", "CTB_DS",
+};
+
+#define ATOM_TABLE_NAMES_CNT 74
+static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
+"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
+"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
+"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
+"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
+"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
+"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
+"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
+"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
+"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
+"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
+"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
+"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
+"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
+"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
+"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
+"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
+"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
+"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
+"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
+"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
+"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
+"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
+"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
+"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
+"MemoryDeviceInit", "EnableYUV",
+};
+
+#define ATOM_IO_NAMES_CNT 5
+static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
+"MM", "PLL", "MC", "PCIE", "PCIE PORT",
+};
+
+#else
+
+#define ATOM_OP_NAMES_CNT 0
+#define ATOM_TABLE_NAMES_CNT 0
+#define ATOM_IO_NAMES_CNT 0
+
+#endif
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/atom-names.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atom-types.h
===================================================================
--- trunk/sys/dev/drm2/radeon/atom-types.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atom-types.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,46 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Dave Airlie
+ */
+
+#ifndef ATOM_TYPES_H
+#define ATOM_TYPES_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atom-types.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* sync atom types to kernel types */
+
+typedef uint16_t USHORT;
+typedef uint32_t ULONG;
+typedef uint8_t UCHAR;
+
+
+#ifndef ATOM_BIG_ENDIAN
+#if defined(__BIG_ENDIAN)
+#define ATOM_BIG_ENDIAN 1
+#else
+#define ATOM_BIG_ENDIAN 0
+#endif
+#endif
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/atom-types.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atom.c
===================================================================
--- trunk/sys/dev/drm2/radeon/atom.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atom.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1404 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atom.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#define ATOM_DEBUG
+
+#include "atom.h"
+#include "atom-names.h"
+#include "atom-bits.h"
+#include "radeon.h"
+
+#define ATOM_COND_ABOVE		0
+#define ATOM_COND_ABOVEOREQUAL	1
+#define ATOM_COND_ALWAYS	2
+#define ATOM_COND_BELOW		3
+#define ATOM_COND_BELOWOREQUAL	4
+#define ATOM_COND_EQUAL		5
+#define ATOM_COND_NOTEQUAL	6
+
+#define ATOM_PORT_ATI	0
+#define ATOM_PORT_PCI	1
+#define ATOM_PORT_SYSIO	2
+
+#define ATOM_UNIT_MICROSEC	0
+#define ATOM_UNIT_MILLISEC	1
+
+#define PLL_INDEX	2
+#define PLL_DATA	3
+
+typedef struct {
+	struct atom_context *ctx;
+	uint32_t *ps, *ws;
+	int ps_shift;
+	uint16_t start;
+	unsigned last_jump;
+	unsigned long last_jump_jiffies;
+	bool abort;
+} atom_exec_context;
+
+int atom_debug = 0;
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+
+static uint32_t atom_arg_mask[8] =
+    { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
+0xFF000000 };
+static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
+
+static int atom_dst_to_src[8][4] = {
+	/* translate destination alignment field to the source alignment encoding */
+	{0, 0, 0, 0},
+	{1, 2, 3, 0},
+	{1, 2, 3, 0},
+	{1, 2, 3, 0},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+};
+static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
+
+static int debug_depth = 0;
+#ifdef ATOM_DEBUG
+static void debug_print_spaces(int n)
+{
+	while (n--)
+		printf("   ");
+}
+
+#define ATOM_DEBUG_PRINT(...) do if (atom_debug) { printf(__FILE__ __VA_ARGS__); } while (0)
+#define ATOM_SDEBUG_PRINT(...) do if (atom_debug) { printf(__FILE__); debug_print_spaces(debug_depth); printf(__VA_ARGS__); } while (0)
+#else
+#define ATOM_DEBUG_PRINT(...) do { } while (0)
+#define ATOM_SDEBUG_PRINT(...) do { } while (0)
+#endif
+
+static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+				 uint32_t index, uint32_t data)
+{
+	struct radeon_device *rdev = ctx->card->dev->dev_private;
+	uint32_t temp = 0xCDCDCDCD;
+
+	while (1)
+		switch (CU8(base)) {
+		case ATOM_IIO_NOP:
+			base++;
+			break;
+		case ATOM_IIO_READ:
+			temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+			base += 3;
+			break;
+		case ATOM_IIO_WRITE:
+			if (rdev->family == CHIP_RV515)
+				(void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+			ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
+			base += 3;
+			break;
+		case ATOM_IIO_CLEAR:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 2));
+			base += 3;
+			break;
+		case ATOM_IIO_SET:
+			temp |=
+			    (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
+									2);
+			base += 3;
+			break;
+		case ATOM_IIO_MOVE_INDEX:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((index >> CU8(base + 2)) &
+			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+									  3);
+			base += 4;
+			break;
+		case ATOM_IIO_MOVE_DATA:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((data >> CU8(base + 2)) &
+			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+									  3);
+			base += 4;
+			break;
+		case ATOM_IIO_MOVE_ATTR:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((ctx->
+			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+									  CU8
+									  (base
+									   +
+									   1))))
+			    << CU8(base + 3);
+			base += 4;
+			break;
+		case ATOM_IIO_END:
+			return temp;
+		default:
+			DRM_INFO("Unknown IIO opcode.\n");
+			return 0;
+		}
+}
+
+static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+				 int *ptr, uint32_t *saved, int print)
+{
+	uint32_t idx, val = 0xCDCDCDCD, align, arg;
+	struct atom_context *gctx = ctx->ctx;
+	arg = attr & 7;
+	align = (attr >> 3) & 7;
+	switch (arg) {
+	case ATOM_ARG_REG:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		if (print)
+			ATOM_DEBUG_PRINT("REG[0x%04X]", idx);
+		idx += gctx->reg_block;
+		switch (gctx->io_mode) {
+		case ATOM_IO_MM:
+			val = gctx->card->reg_read(gctx->card, idx);
+			break;
+		case ATOM_IO_PCI:
+			DRM_INFO(
+			       "PCI registers are not implemented.\n");
+			return 0;
+		case ATOM_IO_SYSIO:
+			DRM_INFO(
+			       "SYSIO registers are not implemented.\n");
+			return 0;
+		default:
+			if (!(gctx->io_mode & 0x80)) {
+				DRM_INFO("Bad IO mode.\n");
+				return 0;
+			}
+			if (!gctx->iio[gctx->io_mode & 0x7F]) {
+				DRM_INFO(
+				       "Undefined indirect IO read method %d.\n",
+				       gctx->io_mode & 0x7F);
+				return 0;
+			}
+			val =
+			    atom_iio_execute(gctx,
+					     gctx->iio[gctx->io_mode & 0x7F],
+					     idx, 0);
+		}
+		break;
+	case ATOM_ARG_PS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		/* get_unaligned_le32 avoids unaligned accesses from atombios
+		 * tables, noticed on a DEC Alpha. */
+		val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+		if (print)
+			ATOM_DEBUG_PRINT("PS[0x%02X,0x%04X]", idx, val);
+		break;
+	case ATOM_ARG_WS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			ATOM_DEBUG_PRINT("WS[0x%02X]", idx);
+		switch (idx) {
+		case ATOM_WS_QUOTIENT:
+			val = gctx->divmul[0];
+			break;
+		case ATOM_WS_REMAINDER:
+			val = gctx->divmul[1];
+			break;
+		case ATOM_WS_DATAPTR:
+			val = gctx->data_block;
+			break;
+		case ATOM_WS_SHIFT:
+			val = gctx->shift;
+			break;
+		case ATOM_WS_OR_MASK:
+			val = 1 << gctx->shift;
+			break;
+		case ATOM_WS_AND_MASK:
+			val = ~(1 << gctx->shift);
+			break;
+		case ATOM_WS_FB_WINDOW:
+			val = gctx->fb_base;
+			break;
+		case ATOM_WS_ATTRIBUTES:
+			val = gctx->io_attr;
+			break;
+		case ATOM_WS_REGPTR:
+			val = gctx->reg_block;
+			break;
+		default:
+			val = ctx->ws[idx];
+		}
+		break;
+	case ATOM_ARG_ID:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		if (print) {
+			if (gctx->data_block)
+				ATOM_DEBUG_PRINT("ID[0x%04X+%04X]", idx, gctx->data_block);
+			else
+				ATOM_DEBUG_PRINT("ID[0x%04X]", idx);
+		}
+		val = U32(idx + gctx->data_block);
+		break;
+	case ATOM_ARG_FB:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
+				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+			val = 0;
+		} else
+			val = gctx->scratch[(gctx->fb_base / 4) + idx];
+		if (print)
+			ATOM_DEBUG_PRINT("FB[0x%02X]", idx);
+		break;
+	case ATOM_ARG_IMM:
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			val = U32(*ptr);
+			(*ptr) += 4;
+			if (print)
+				ATOM_DEBUG_PRINT("IMM 0x%08X\n", val);
+			return val;
+		case ATOM_SRC_WORD0:
+		case ATOM_SRC_WORD8:
+		case ATOM_SRC_WORD16:
+			val = U16(*ptr);
+			(*ptr) += 2;
+			if (print)
+				ATOM_DEBUG_PRINT("IMM 0x%04X\n", val);
+			return val;
+		case ATOM_SRC_BYTE0:
+		case ATOM_SRC_BYTE8:
+		case ATOM_SRC_BYTE16:
+		case ATOM_SRC_BYTE24:
+			val = U8(*ptr);
+			(*ptr)++;
+			if (print)
+				ATOM_DEBUG_PRINT("IMM 0x%02X\n", val);
+			return val;
+		}
+		return 0;
+	case ATOM_ARG_PLL:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			ATOM_DEBUG_PRINT("PLL[0x%02X]", idx);
+		val = gctx->card->pll_read(gctx->card, idx);
+		break;
+	case ATOM_ARG_MC:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			ATOM_DEBUG_PRINT("MC[0x%02X]", idx);
+		val = gctx->card->mc_read(gctx->card, idx);
+		break;
+	}
+	if (saved)
+		*saved = val;
+	val &= atom_arg_mask[align];
+	val >>= atom_arg_shift[align];
+	if (print)
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			ATOM_DEBUG_PRINT(".[31:0] -> 0x%08X\n", val);
+			break;
+		case ATOM_SRC_WORD0:
+			ATOM_DEBUG_PRINT(".[15:0] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_WORD8:
+			ATOM_DEBUG_PRINT(".[23:8] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_WORD16:
+			ATOM_DEBUG_PRINT(".[31:16] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_BYTE0:
+			ATOM_DEBUG_PRINT(".[7:0] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE8:
+			ATOM_DEBUG_PRINT(".[15:8] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE16:
+			ATOM_DEBUG_PRINT(".[23:16] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE24:
+			ATOM_DEBUG_PRINT(".[31:24] -> 0x%02X\n", val);
+			break;
+		}
+	return val;
+}
+
+static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+	uint32_t align = (attr >> 3) & 7, arg = attr & 7;
+	switch (arg) {
+	case ATOM_ARG_REG:
+	case ATOM_ARG_ID:
+		(*ptr) += 2;
+		break;
+	case ATOM_ARG_PLL:
+	case ATOM_ARG_MC:
+	case ATOM_ARG_PS:
+	case ATOM_ARG_WS:
+	case ATOM_ARG_FB:
+		(*ptr)++;
+		break;
+	case ATOM_ARG_IMM:
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			(*ptr) += 4;
+			return;
+		case ATOM_SRC_WORD0:
+		case ATOM_SRC_WORD8:
+		case ATOM_SRC_WORD16:
+			(*ptr) += 2;
+			return;
+		case ATOM_SRC_BYTE0:
+		case ATOM_SRC_BYTE8:
+		case ATOM_SRC_BYTE16:
+		case ATOM_SRC_BYTE24:
+			(*ptr)++;
+			return;
+		}
+		return;
+	}
+}
+
+static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
+}
+
+static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
+{
+	uint32_t val = 0xCDCDCDCD;
+
+	switch (align) {
+	case ATOM_SRC_DWORD:
+		val = U32(*ptr);
+		(*ptr) += 4;
+		break;
+	case ATOM_SRC_WORD0:
+	case ATOM_SRC_WORD8:
+	case ATOM_SRC_WORD16:
+		val = U16(*ptr);
+		(*ptr) += 2;
+		break;
+	case ATOM_SRC_BYTE0:
+	case ATOM_SRC_BYTE8:
+	case ATOM_SRC_BYTE16:
+	case ATOM_SRC_BYTE24:
+		val = U8(*ptr);
+		(*ptr)++;
+		break;
+	}
+	return val;
+}
+
+static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+			     int *ptr, uint32_t *saved, int print)
+{
+	return atom_get_src_int(ctx,
+				arg | atom_dst_to_src[(attr >> 3) &
+						      7][(attr >> 6) & 3] << 3,
+				ptr, saved, print);
+}
+
+static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
+{
+	atom_skip_src_int(ctx,
+			  arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
+								 3] << 3, ptr);
+}
+
+static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+			 int *ptr, uint32_t val, uint32_t saved)
+{
+	uint32_t align =
+	    atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
+	    val, idx;
+	struct atom_context *gctx = ctx->ctx;
+	old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
+	val <<= atom_arg_shift[align];
+	val &= atom_arg_mask[align];
+	saved &= ~atom_arg_mask[align];
+	val |= saved;
+	switch (arg) {
+	case ATOM_ARG_REG:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		ATOM_DEBUG_PRINT("REG[0x%04X]", idx);
+		idx += gctx->reg_block;
+		switch (gctx->io_mode) {
+		case ATOM_IO_MM:
+			if (idx == 0)
+				gctx->card->reg_write(gctx->card, idx,
+						      val << 2);
+			else
+				gctx->card->reg_write(gctx->card, idx, val);
+			break;
+		case ATOM_IO_PCI:
+			DRM_INFO(
+			       "PCI registers are not implemented.\n");
+			return;
+		case ATOM_IO_SYSIO:
+			DRM_INFO(
+			       "SYSIO registers are not implemented.\n");
+			return;
+		default:
+			if (!(gctx->io_mode & 0x80)) {
+				DRM_INFO("Bad IO mode.\n");
+				return;
+			}
+			if (!gctx->iio[gctx->io_mode & 0xFF]) {
+				DRM_INFO(
+				       "Undefined indirect IO write method %d.\n",
+				       gctx->io_mode & 0x7F);
+				return;
+			}
+			atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
+					 idx, val);
+		}
+		break;
+	case ATOM_ARG_PS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		ATOM_DEBUG_PRINT("PS[0x%02X]", idx);
+		ctx->ps[idx] = cpu_to_le32(val);
+		break;
+	case ATOM_ARG_WS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		ATOM_DEBUG_PRINT("WS[0x%02X]", idx);
+		switch (idx) {
+		case ATOM_WS_QUOTIENT:
+			gctx->divmul[0] = val;
+			break;
+		case ATOM_WS_REMAINDER:
+			gctx->divmul[1] = val;
+			break;
+		case ATOM_WS_DATAPTR:
+			gctx->data_block = val;
+			break;
+		case ATOM_WS_SHIFT:
+			gctx->shift = val;
+			break;
+		case ATOM_WS_OR_MASK:
+		case ATOM_WS_AND_MASK:
+			break;
+		case ATOM_WS_FB_WINDOW:
+			gctx->fb_base = val;
+			break;
+		case ATOM_WS_ATTRIBUTES:
+			gctx->io_attr = val;
+			break;
+		case ATOM_WS_REGPTR:
+			gctx->reg_block = val;
+			break;
+		default:
+			ctx->ws[idx] = val;
+		}
+		break;
+	case ATOM_ARG_FB:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
+				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+		} else
+			gctx->scratch[(gctx->fb_base / 4) + idx] = val;
+		ATOM_DEBUG_PRINT("FB[0x%02X]", idx);
+		break;
+	case ATOM_ARG_PLL:
+		idx = U8(*ptr);
+		(*ptr)++;
+		ATOM_DEBUG_PRINT("PLL[0x%02X]", idx);
+		gctx->card->pll_write(gctx->card, idx, val);
+		break;
+	case ATOM_ARG_MC:
+		idx = U8(*ptr);
+		(*ptr)++;
+		ATOM_DEBUG_PRINT("MC[0x%02X]", idx);
+		gctx->card->mc_write(gctx->card, idx, val);
+		return;
+	}
+	switch (align) {
+	case ATOM_SRC_DWORD:
+		ATOM_DEBUG_PRINT(".[31:0] <- 0x%08X\n", old_val);
+		break;
+	case ATOM_SRC_WORD0:
+		ATOM_DEBUG_PRINT(".[15:0] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_WORD8:
+		ATOM_DEBUG_PRINT(".[23:8] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_WORD16:
+		ATOM_DEBUG_PRINT(".[31:16] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE0:
+		ATOM_DEBUG_PRINT(".[7:0] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE8:
+		ATOM_DEBUG_PRINT(".[15:8] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE16:
+		ATOM_DEBUG_PRINT(".[23:16] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE24:
+		ATOM_DEBUG_PRINT(".[31:24] <- 0x%02X\n", old_val);
+		break;
+	}
+}
+
+static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	ATOM_SDEBUG_PRINT("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst += src;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	ATOM_SDEBUG_PRINT("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= src;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
+{
+	DRM_INFO("ATOM BIOS beeped!\n");
+}
+
+static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int idx = U8((*ptr)++);
+	int r = 0;
+
+	if (idx < ATOM_TABLE_NAMES_CNT)
+		ATOM_SDEBUG_PRINT("   table: %d (%s)\n", idx, atom_table_names[idx]);
+	else
+		ATOM_SDEBUG_PRINT("   table: %d\n", idx);
+	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
+		r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+	if (r) {
+		ctx->abort = true;
+	}
+}
+
+static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t saved;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
+}
+
+static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	ATOM_SDEBUG_PRINT("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	ATOM_SDEBUG_PRINT("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->cs_equal = (dst == src);
+	ctx->ctx->cs_above = (dst > src);
+	ATOM_SDEBUG_PRINT("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
+	       ctx->ctx->cs_above ? "GT" : "LE");
+}
+
+static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
+{
+	unsigned count = U8((*ptr)++);
+	ATOM_SDEBUG_PRINT("   count: %d\n", count);
+	if (arg == ATOM_UNIT_MICROSEC)
+		udelay(count);
+	else if (!drm_can_sleep())
+		mdelay(count);
+	else
+		DRM_MSLEEP(count);
+}
+
+static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	ATOM_SDEBUG_PRINT("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	ATOM_SDEBUG_PRINT("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	if (src != 0) {
+		ctx->ctx->divmul[0] = dst / src;
+		ctx->ctx->divmul[1] = dst % src;
+	} else {
+		ctx->ctx->divmul[0] = 0;
+		ctx->ctx->divmul[1] = 0;
+	}
+}
+
+static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
+{
+	/* functionally, a nop */
+}
+
+static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int execute = 0, target = U16(*ptr);
+	unsigned long cjiffies;
+
+	(*ptr) += 2;
+	switch (arg) {
+	case ATOM_COND_ABOVE:
+		execute = ctx->ctx->cs_above;
+		break;
+	case ATOM_COND_ABOVEOREQUAL:
+		execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
+		break;
+	case ATOM_COND_ALWAYS:
+		execute = 1;
+		break;
+	case ATOM_COND_BELOW:
+		execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
+		break;
+	case ATOM_COND_BELOWOREQUAL:
+		execute = !ctx->ctx->cs_above;
+		break;
+	case ATOM_COND_EQUAL:
+		execute = ctx->ctx->cs_equal;
+		break;
+	case ATOM_COND_NOTEQUAL:
+		execute = !ctx->ctx->cs_equal;
+		break;
+	}
+	if (arg != ATOM_COND_ALWAYS)
+		ATOM_SDEBUG_PRINT("   taken: %s\n", execute ? "yes" : "no");
+	ATOM_SDEBUG_PRINT("   target: 0x%04X\n", target);
+	if (execute) {
+		if (ctx->last_jump == (ctx->start + target)) {
+			cjiffies = jiffies;
+			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+				cjiffies -= ctx->last_jump_jiffies;
+				if ((jiffies_to_msecs(cjiffies) > 5000)) {
+					DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+					ctx->abort = true;
+				}
+			} else {
+				/* jiffies wrap around we will just wait a little longer */
+				ctx->last_jump_jiffies = jiffies;
+			}
+		} else {
+			ctx->last_jump = ctx->start + target;
+			ctx->last_jump_jiffies = jiffies;
+		}
+		*ptr = ctx->start + target;
+	}
+}
+
+static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, mask, src, saved;
+	int dptr = *ptr;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+	ATOM_SDEBUG_PRINT("   mask: 0x%08x", mask);
+	ATOM_SDEBUG_PRINT("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= mask;
+	dst |= src;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t src, saved;
+	int dptr = *ptr;
+	if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
+		atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+	else {
+		atom_skip_dst(ctx, arg, attr, ptr);
+		saved = 0xCDCDCDCD;
+	}
+	ATOM_SDEBUG_PRINT("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, src, saved);
+}
+
+static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	ATOM_SDEBUG_PRINT("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	ATOM_SDEBUG_PRINT("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->divmul[0] = dst * src;
+}
+
+static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
+{
+	/* nothing */
+}
+
+static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	ATOM_SDEBUG_PRINT("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst |= src;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t val = U8((*ptr)++);
+	ATOM_SDEBUG_PRINT("POST card output: 0x%02X\n", val);
+}
+
+static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
+{
+	DRM_INFO("unimplemented!\n");
+}
+
+static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+	DRM_INFO("unimplemented!\n");
+}
+
+static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+	DRM_INFO("unimplemented!\n");
+}
+
+static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int idx = U8(*ptr);
+	(*ptr)++;
+	ATOM_SDEBUG_PRINT("   block: %d\n", idx);
+	if (!idx)
+		ctx->ctx->data_block = 0;
+	else if (idx == 255)
+		ctx->ctx->data_block = ctx->start;
+	else
+		ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
+	ATOM_SDEBUG_PRINT("   base: 0x%04X\n", ctx->ctx->data_block);
+}
+
+static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	ATOM_SDEBUG_PRINT("   fb_base: ");
+	ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
+}
+
+static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int port;
+	switch (arg) {
+	case ATOM_PORT_ATI:
+		port = U16(*ptr);
+		if (port < ATOM_IO_NAMES_CNT)
+			ATOM_SDEBUG_PRINT("   port: %d (%s)\n", port, atom_io_names[port]);
+		else
+			ATOM_SDEBUG_PRINT("   port: %d\n", port);
+		if (!port)
+			ctx->ctx->io_mode = ATOM_IO_MM;
+		else
+			ctx->ctx->io_mode = ATOM_IO_IIO | port;
+		(*ptr) += 2;
+		break;
+	case ATOM_PORT_PCI:
+		ctx->ctx->io_mode = ATOM_IO_PCI;
+		(*ptr)++;
+		break;
+	case ATOM_PORT_SYSIO:
+		ctx->ctx->io_mode = ATOM_IO_SYSIO;
+		(*ptr)++;
+		break;
+	}
+}
+
+static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
+{
+	ctx->ctx->reg_block = U16(*ptr);
+	(*ptr) += 2;
+	ATOM_SDEBUG_PRINT("   base: 0x%04X\n", ctx->ctx->reg_block);
+}
+
+static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+	ATOM_SDEBUG_PRINT("   shift: %d\n", shift);
+	dst <<= shift;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+	ATOM_SDEBUG_PRINT("   shift: %d\n", shift);
+	dst >>= shift;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	/* op needs to full dst value */
+	dst = saved;
+	shift = atom_get_src(ctx, attr, ptr);
+	ATOM_SDEBUG_PRINT("   shift: %d\n", shift);
+	dst <<= shift;
+	dst &= atom_arg_mask[dst_align];
+	dst >>= atom_arg_shift[dst_align];
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	/* op needs to full dst value */
+	dst = saved;
+	shift = atom_get_src(ctx, attr, ptr);
+	ATOM_SDEBUG_PRINT("   shift: %d\n", shift);
+	dst >>= shift;
+	dst &= atom_arg_mask[dst_align];
+	dst >>= atom_arg_shift[dst_align];
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	ATOM_SDEBUG_PRINT("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst -= src;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t src, val, target;
+	ATOM_SDEBUG_PRINT("   switch: ");
+	src = atom_get_src(ctx, attr, ptr);
+	while (U16(*ptr) != ATOM_CASE_END)
+		if (U8(*ptr) == ATOM_CASE_MAGIC) {
+			(*ptr)++;
+			ATOM_SDEBUG_PRINT("   case: ");
+			val =
+			    atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
+					 ptr);
+			target = U16(*ptr);
+			if (val == src) {
+				ATOM_SDEBUG_PRINT("   target: %04X\n", target);
+				*ptr = ctx->start + target;
+				return;
+			}
+			(*ptr) += 2;
+		} else {
+			DRM_INFO("Bad case.\n");
+			return;
+		}
+	(*ptr) += 2;
+}
+
+static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	ATOM_SDEBUG_PRINT("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	ATOM_SDEBUG_PRINT("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->cs_equal = ((dst & src) == 0);
+	ATOM_SDEBUG_PRINT("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
+}
+
+static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	ATOM_SDEBUG_PRINT("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst ^= src;
+	ATOM_SDEBUG_PRINT("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
+{
+	DRM_INFO("unimplemented!\n");
+}
+
+static struct {
+	void (*func) (atom_exec_context *, int *, int);
+	int arg;
+} opcode_table[ATOM_OP_CNT] = {
+	{
+	NULL, 0}, {
+	atom_op_move, ATOM_ARG_REG}, {
+	atom_op_move, ATOM_ARG_PS}, {
+	atom_op_move, ATOM_ARG_WS}, {
+	atom_op_move, ATOM_ARG_FB}, {
+	atom_op_move, ATOM_ARG_PLL}, {
+	atom_op_move, ATOM_ARG_MC}, {
+	atom_op_and, ATOM_ARG_REG}, {
+	atom_op_and, ATOM_ARG_PS}, {
+	atom_op_and, ATOM_ARG_WS}, {
+	atom_op_and, ATOM_ARG_FB}, {
+	atom_op_and, ATOM_ARG_PLL}, {
+	atom_op_and, ATOM_ARG_MC}, {
+	atom_op_or, ATOM_ARG_REG}, {
+	atom_op_or, ATOM_ARG_PS}, {
+	atom_op_or, ATOM_ARG_WS}, {
+	atom_op_or, ATOM_ARG_FB}, {
+	atom_op_or, ATOM_ARG_PLL}, {
+	atom_op_or, ATOM_ARG_MC}, {
+	atom_op_shift_left, ATOM_ARG_REG}, {
+	atom_op_shift_left, ATOM_ARG_PS}, {
+	atom_op_shift_left, ATOM_ARG_WS}, {
+	atom_op_shift_left, ATOM_ARG_FB}, {
+	atom_op_shift_left, ATOM_ARG_PLL}, {
+	atom_op_shift_left, ATOM_ARG_MC}, {
+	atom_op_shift_right, ATOM_ARG_REG}, {
+	atom_op_shift_right, ATOM_ARG_PS}, {
+	atom_op_shift_right, ATOM_ARG_WS}, {
+	atom_op_shift_right, ATOM_ARG_FB}, {
+	atom_op_shift_right, ATOM_ARG_PLL}, {
+	atom_op_shift_right, ATOM_ARG_MC}, {
+	atom_op_mul, ATOM_ARG_REG}, {
+	atom_op_mul, ATOM_ARG_PS}, {
+	atom_op_mul, ATOM_ARG_WS}, {
+	atom_op_mul, ATOM_ARG_FB}, {
+	atom_op_mul, ATOM_ARG_PLL}, {
+	atom_op_mul, ATOM_ARG_MC}, {
+	atom_op_div, ATOM_ARG_REG}, {
+	atom_op_div, ATOM_ARG_PS}, {
+	atom_op_div, ATOM_ARG_WS}, {
+	atom_op_div, ATOM_ARG_FB}, {
+	atom_op_div, ATOM_ARG_PLL}, {
+	atom_op_div, ATOM_ARG_MC}, {
+	atom_op_add, ATOM_ARG_REG}, {
+	atom_op_add, ATOM_ARG_PS}, {
+	atom_op_add, ATOM_ARG_WS}, {
+	atom_op_add, ATOM_ARG_FB}, {
+	atom_op_add, ATOM_ARG_PLL}, {
+	atom_op_add, ATOM_ARG_MC}, {
+	atom_op_sub, ATOM_ARG_REG}, {
+	atom_op_sub, ATOM_ARG_PS}, {
+	atom_op_sub, ATOM_ARG_WS}, {
+	atom_op_sub, ATOM_ARG_FB}, {
+	atom_op_sub, ATOM_ARG_PLL}, {
+	atom_op_sub, ATOM_ARG_MC}, {
+	atom_op_setport, ATOM_PORT_ATI}, {
+	atom_op_setport, ATOM_PORT_PCI}, {
+	atom_op_setport, ATOM_PORT_SYSIO}, {
+	atom_op_setregblock, 0}, {
+	atom_op_setfbbase, 0}, {
+	atom_op_compare, ATOM_ARG_REG}, {
+	atom_op_compare, ATOM_ARG_PS}, {
+	atom_op_compare, ATOM_ARG_WS}, {
+	atom_op_compare, ATOM_ARG_FB}, {
+	atom_op_compare, ATOM_ARG_PLL}, {
+	atom_op_compare, ATOM_ARG_MC}, {
+	atom_op_switch, 0}, {
+	atom_op_jump, ATOM_COND_ALWAYS}, {
+	atom_op_jump, ATOM_COND_EQUAL}, {
+	atom_op_jump, ATOM_COND_BELOW}, {
+	atom_op_jump, ATOM_COND_ABOVE}, {
+	atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
+	atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
+	atom_op_jump, ATOM_COND_NOTEQUAL}, {
+	atom_op_test, ATOM_ARG_REG}, {
+	atom_op_test, ATOM_ARG_PS}, {
+	atom_op_test, ATOM_ARG_WS}, {
+	atom_op_test, ATOM_ARG_FB}, {
+	atom_op_test, ATOM_ARG_PLL}, {
+	atom_op_test, ATOM_ARG_MC}, {
+	atom_op_delay, ATOM_UNIT_MILLISEC}, {
+	atom_op_delay, ATOM_UNIT_MICROSEC}, {
+	atom_op_calltable, 0}, {
+	atom_op_repeat, 0}, {
+	atom_op_clear, ATOM_ARG_REG}, {
+	atom_op_clear, ATOM_ARG_PS}, {
+	atom_op_clear, ATOM_ARG_WS}, {
+	atom_op_clear, ATOM_ARG_FB}, {
+	atom_op_clear, ATOM_ARG_PLL}, {
+	atom_op_clear, ATOM_ARG_MC}, {
+	atom_op_nop, 0}, {
+	atom_op_eot, 0}, {
+	atom_op_mask, ATOM_ARG_REG}, {
+	atom_op_mask, ATOM_ARG_PS}, {
+	atom_op_mask, ATOM_ARG_WS}, {
+	atom_op_mask, ATOM_ARG_FB}, {
+	atom_op_mask, ATOM_ARG_PLL}, {
+	atom_op_mask, ATOM_ARG_MC}, {
+	atom_op_postcard, 0}, {
+	atom_op_beep, 0}, {
+	atom_op_savereg, 0}, {
+	atom_op_restorereg, 0}, {
+	atom_op_setdatablock, 0}, {
+	atom_op_xor, ATOM_ARG_REG}, {
+	atom_op_xor, ATOM_ARG_PS}, {
+	atom_op_xor, ATOM_ARG_WS}, {
+	atom_op_xor, ATOM_ARG_FB}, {
+	atom_op_xor, ATOM_ARG_PLL}, {
+	atom_op_xor, ATOM_ARG_MC}, {
+	atom_op_shl, ATOM_ARG_REG}, {
+	atom_op_shl, ATOM_ARG_PS}, {
+	atom_op_shl, ATOM_ARG_WS}, {
+	atom_op_shl, ATOM_ARG_FB}, {
+	atom_op_shl, ATOM_ARG_PLL}, {
+	atom_op_shl, ATOM_ARG_MC}, {
+	atom_op_shr, ATOM_ARG_REG}, {
+	atom_op_shr, ATOM_ARG_PS}, {
+	atom_op_shr, ATOM_ARG_WS}, {
+	atom_op_shr, ATOM_ARG_FB}, {
+	atom_op_shr, ATOM_ARG_PLL}, {
+	atom_op_shr, ATOM_ARG_MC}, {
+atom_op_debug, 0},};
+
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int base = CU16(ctx->cmd_table + 4 + 2 * index);
+	int len, ws, ps, ptr;
+	unsigned char op;
+	atom_exec_context ectx;
+	int ret = 0;
+
+	if (!base)
+		return -EINVAL;
+
+	len = CU16(base + ATOM_CT_SIZE_PTR);
+	ws = CU8(base + ATOM_CT_WS_PTR);
+	ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
+	ptr = base + ATOM_CT_CODE_PTR;
+
+	ATOM_SDEBUG_PRINT(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
+
+	ectx.ctx = ctx;
+	ectx.ps_shift = ps / 4;
+	ectx.start = base;
+	ectx.ps = params;
+	ectx.abort = false;
+	ectx.last_jump = 0;
+	if (ws)
+		ectx.ws = malloc(4 * ws, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	else
+		ectx.ws = NULL;
+
+	debug_depth++;
+	while (1) {
+		op = CU8(ptr++);
+		if (op < ATOM_OP_NAMES_CNT)
+			ATOM_SDEBUG_PRINT("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
+		else
+			ATOM_SDEBUG_PRINT("[%d] @ 0x%04X\n", op, ptr - 1);
+		if (ectx.abort) {
+			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+				base, len, ws, ps, ptr - 1);
+			ret = -EINVAL;
+			goto free;
+		}
+
+		if (op < ATOM_OP_CNT && op > 0)
+			opcode_table[op].func(&ectx, &ptr,
+					      opcode_table[op].arg);
+		else
+			break;
+
+		if (op == ATOM_OP_EOT)
+			break;
+	}
+	debug_depth--;
+	ATOM_SDEBUG_PRINT("<<\n");
+
+free:
+	if (ws)
+		free(ectx.ws, DRM_MEM_DRIVER);
+	return ret;
+}
+
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int r;
+
+	sx_xlock(&ctx->mutex);
+	/* reset reg block */
+	ctx->reg_block = 0;
+	/* reset fb window */
+	ctx->fb_base = 0;
+	/* reset io mode */
+	ctx->io_mode = ATOM_IO_MM;
+	r = atom_execute_table_locked(ctx, index, params);
+	sx_xunlock(&ctx->mutex);
+	return r;
+}
+
+static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
+
+static void atom_index_iio(struct atom_context *ctx, int base)
+{
+	ctx->iio = malloc(2 * 256, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	while (CU8(base) == ATOM_IIO_START) {
+		ctx->iio[CU8(base + 1)] = base + 2;
+		base += 2;
+		while (CU8(base) != ATOM_IIO_END)
+			base += atom_iio_len[CU8(base)];
+		base += 3;
+	}
+}
+
+struct atom_context *atom_parse(struct card_info *card, void *bios)
+{
+	int base;
+	struct atom_context *ctx =
+	    malloc(sizeof(struct atom_context), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	char *str;
+	char name[512];
+	int i;
+
+	if (!ctx)
+		return NULL;
+
+	ctx->card = card;
+	ctx->bios = bios;
+
+	if (CU16(0) != ATOM_BIOS_MAGIC) {
+		DRM_INFO("Invalid BIOS magic.\n");
+		free(ctx, DRM_MEM_DRIVER);
+		return NULL;
+	}
+	if (strncmp
+	    (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
+	     strlen(ATOM_ATI_MAGIC))) {
+		DRM_INFO("Invalid ATI magic.\n");
+		free(ctx, DRM_MEM_DRIVER);
+		return NULL;
+	}
+
+	base = CU16(ATOM_ROM_TABLE_PTR);
+	if (strncmp
+	    (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
+	     strlen(ATOM_ROM_MAGIC))) {
+		DRM_INFO("Invalid ATOM magic.\n");
+		free(ctx, DRM_MEM_DRIVER);
+		return NULL;
+	}
+
+	ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
+	ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
+	atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+
+	str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
+	while (*str && ((*str == '\n') || (*str == '\r')))
+		str++;
+	/* name string isn't always 0 terminated */
+	for (i = 0; i < 511; i++) {
+		name[i] = str[i];
+		if (name[i] < '.' || name[i] > 'z') {
+			name[i] = 0;
+			break;
+		}
+	}
+	DRM_INFO("ATOM BIOS: %s\n", name);
+
+	return ctx;
+}
+
+int atom_asic_init(struct atom_context *ctx)
+{
+	struct radeon_device *rdev = ctx->card->dev->dev_private;
+	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
+	uint32_t ps[16];
+	int ret;
+
+	memset(ps, 0, 64);
+
+	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
+	ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
+	if (!ps[0] || !ps[1])
+		return 1;
+
+	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
+		return 1;
+	ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+	if (ret)
+		return ret;
+
+	memset(ps, 0, 64);
+
+	if (rdev->family < CHIP_R600) {
+		if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
+			atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+	}
+	return ret;
+}
+
+void atom_destroy(struct atom_context *ctx)
+{
+	if (ctx->iio)
+		free(ctx->iio, DRM_MEM_DRIVER);
+	free(ctx, DRM_MEM_DRIVER);
+}
+
+bool atom_parse_data_header(struct atom_context *ctx, int index,
+			    uint16_t * size, uint8_t * frev, uint8_t * crev,
+			    uint16_t * data_start)
+{
+	int offset = index * 2 + 4;
+	int idx = CU16(ctx->data_table + offset);
+	u16 *mdt = (u16 *)((char *)ctx->bios + ctx->data_table + 4);
+
+	if (!mdt[index])
+		return false;
+
+	if (size)
+		*size = CU16(idx);
+	if (frev)
+		*frev = CU8(idx + 2);
+	if (crev)
+		*crev = CU8(idx + 3);
+	*data_start = idx;
+	return true;
+}
+
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+			   uint8_t * crev)
+{
+	int offset = index * 2 + 4;
+	int idx = CU16(ctx->cmd_table + offset);
+	u16 *mct = (u16 *)((char *)ctx->bios + ctx->cmd_table + 4);
+
+	if (!mct[index])
+		return false;
+
+	if (frev)
+		*frev = CU8(idx + 2);
+	if (crev)
+		*crev = CU8(idx + 3);
+	return true;
+}
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+	uint16_t data_offset;
+	int usage_bytes = 0;
+	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+	if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+		firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)((char *)ctx->bios + data_offset);
+
+		DRM_DEBUG("atom firmware requested %08x %dkb\n",
+			  le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+			  le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
+
+		usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+	}
+	ctx->scratch_size_bytes = 0;
+	if (usage_bytes == 0)
+		usage_bytes = 20 * 1024;
+	/* allocate some scratch memory */
+	ctx->scratch = malloc(usage_bytes, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!ctx->scratch)
+		return -ENOMEM;
+	ctx->scratch_size_bytes = usage_bytes;
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/atom.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atom.h
===================================================================
--- trunk/sys/dev/drm2/radeon/atom.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atom.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,162 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atom.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef ATOM_H
+#define ATOM_H
+
+#include <dev/drm2/drmP.h>
+
+#define ATOM_BIOS_MAGIC		0xAA55
+#define ATOM_ATI_MAGIC_PTR	0x30
+#define ATOM_ATI_MAGIC		" 761295520"
+#define ATOM_ROM_TABLE_PTR	0x48
+
+#define ATOM_ROM_MAGIC		"ATOM"
+#define ATOM_ROM_MAGIC_PTR	4
+
+#define ATOM_ROM_MSG_PTR	0x10
+#define ATOM_ROM_CMD_PTR	0x1E
+#define ATOM_ROM_DATA_PTR	0x20
+
+#define ATOM_CMD_INIT		0
+#define ATOM_CMD_SETSCLK	0x0A
+#define ATOM_CMD_SETMCLK	0x0B
+#define ATOM_CMD_SETPCLK	0x0C
+#define ATOM_CMD_SPDFANCNTL	0x39
+
+#define ATOM_DATA_FWI_PTR	0xC
+#define ATOM_DATA_IIO_PTR	0x32
+
+#define ATOM_FWI_DEFSCLK_PTR	8
+#define ATOM_FWI_DEFMCLK_PTR	0xC
+#define ATOM_FWI_MAXSCLK_PTR	0x24
+#define ATOM_FWI_MAXMCLK_PTR	0x28
+
+#define ATOM_CT_SIZE_PTR	0
+#define ATOM_CT_WS_PTR		4
+#define ATOM_CT_PS_PTR		5
+#define ATOM_CT_PS_MASK		0x7F
+#define ATOM_CT_CODE_PTR	6
+
+#define ATOM_OP_CNT		123
+#define ATOM_OP_EOT		91
+
+#define ATOM_CASE_MAGIC		0x63
+#define ATOM_CASE_END		0x5A5A
+
+#define ATOM_ARG_REG		0
+#define ATOM_ARG_PS		1
+#define ATOM_ARG_WS		2
+#define ATOM_ARG_FB		3
+#define ATOM_ARG_ID		4
+#define ATOM_ARG_IMM		5
+#define ATOM_ARG_PLL		6
+#define ATOM_ARG_MC		7
+
+#define ATOM_SRC_DWORD		0
+#define ATOM_SRC_WORD0		1
+#define ATOM_SRC_WORD8		2
+#define ATOM_SRC_WORD16		3
+#define ATOM_SRC_BYTE0		4
+#define ATOM_SRC_BYTE8		5
+#define ATOM_SRC_BYTE16		6
+#define ATOM_SRC_BYTE24		7
+
+#define ATOM_WS_QUOTIENT	0x40
+#define ATOM_WS_REMAINDER	0x41
+#define ATOM_WS_DATAPTR		0x42
+#define ATOM_WS_SHIFT		0x43
+#define ATOM_WS_OR_MASK		0x44
+#define ATOM_WS_AND_MASK	0x45
+#define ATOM_WS_FB_WINDOW	0x46
+#define ATOM_WS_ATTRIBUTES	0x47
+#define ATOM_WS_REGPTR  	0x48
+
+#define ATOM_IIO_NOP		0
+#define ATOM_IIO_START		1
+#define ATOM_IIO_READ		2
+#define ATOM_IIO_WRITE		3
+#define ATOM_IIO_CLEAR		4
+#define ATOM_IIO_SET		5
+#define ATOM_IIO_MOVE_INDEX	6
+#define ATOM_IIO_MOVE_ATTR	7
+#define ATOM_IIO_MOVE_DATA	8
+#define ATOM_IIO_END		9
+
+#define ATOM_IO_MM		0
+#define ATOM_IO_PCI		1
+#define ATOM_IO_SYSIO		2
+#define ATOM_IO_IIO		0x80
+
+struct card_info {
+	struct drm_device *dev;
+	void (* reg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* reg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* ioreg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* ioreg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* mc_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* mc_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* pll_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* pll_read)(struct card_info *, uint32_t);          /*  filled by driver */
+};
+
+struct atom_context {
+	struct card_info *card;
+	struct sx mutex;
+	void *bios;
+	uint32_t cmd_table, data_table;
+	uint16_t *iio;
+
+	uint16_t data_block;
+	uint32_t fb_base;
+	uint32_t divmul[2];
+	uint16_t io_attr;
+	uint16_t reg_block;
+	uint8_t shift;
+	int cs_equal, cs_above;
+	int io_mode;
+	uint32_t *scratch;
+	int scratch_size_bytes;
+};
+
+extern int atom_debug;
+
+struct atom_context *atom_parse(struct card_info *, void *);
+int atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_asic_init(struct atom_context *);
+void atom_destroy(struct atom_context *);
+bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+			    uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+bool atom_parse_cmd_header(struct atom_context *ctx, int index,
+			   uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
+#include "atom-types.h"
+#include "atombios.h"
+#include "ObjectID.h"
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/atom.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atombios.h
===================================================================
--- trunk/sys/dev/drm2/radeon/atombios.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atombios.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,8014 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.  
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atombios.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+
+/****************************************************************************/	
+/*Portion I: Definitions  shared between VBIOS and Driver                   */
+/****************************************************************************/
+
+
+#ifndef _ATOMBIOS_H
+#define _ATOMBIOS_H
+
+#define ATOM_VERSION_MAJOR                   0x00020000
+#define ATOM_VERSION_MINOR                   0x00000002
+
+#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
+
+/* Endianness should be specified before inclusion,
+ * default to little endian
+ */
+#ifndef ATOM_BIG_ENDIAN
+#error Endian not specified
+#endif
+
+#ifdef _H2INC
+  #ifndef ULONG 
+    typedef unsigned long ULONG;
+  #endif
+
+  #ifndef UCHAR
+    typedef unsigned char UCHAR;
+  #endif
+
+  #ifndef USHORT 
+    typedef unsigned short USHORT;
+  #endif
+#endif
+      
+#define ATOM_DAC_A            0 
+#define ATOM_DAC_B            1
+#define ATOM_EXT_DAC          2
+
+#define ATOM_CRTC1            0
+#define ATOM_CRTC2            1
+#define ATOM_CRTC3            2
+#define ATOM_CRTC4            3
+#define ATOM_CRTC5            4
+#define ATOM_CRTC6            5
+#define ATOM_CRTC_INVALID     0xFF
+
+#define ATOM_DIGA             0
+#define ATOM_DIGB             1
+
+#define ATOM_PPLL1            0
+#define ATOM_PPLL2            1
+#define ATOM_DCPLL            2
+#define ATOM_PPLL0            2
+#define ATOM_EXT_PLL1         8
+#define ATOM_EXT_PLL2         9
+#define ATOM_EXT_CLOCK        10
+#define ATOM_PPLL_INVALID     0xFF
+
+#define ENCODER_REFCLK_SRC_P1PLL       0       
+#define ENCODER_REFCLK_SRC_P2PLL       1
+#define ENCODER_REFCLK_SRC_DCPLL       2
+#define ENCODER_REFCLK_SRC_EXTCLK      3
+#define ENCODER_REFCLK_SRC_INVALID     0xFF
+
+#define ATOM_SCALER1          0
+#define ATOM_SCALER2          1
+
+#define ATOM_SCALER_DISABLE   0   
+#define ATOM_SCALER_CENTER    1   
+#define ATOM_SCALER_EXPANSION 2   
+#define ATOM_SCALER_MULTI_EX  3   
+
+#define ATOM_DISABLE          0
+#define ATOM_ENABLE           1
+#define ATOM_LCD_BLOFF                          (ATOM_DISABLE+2)
+#define ATOM_LCD_BLON                           (ATOM_ENABLE+2)
+#define ATOM_LCD_BL_BRIGHTNESS_CONTROL          (ATOM_ENABLE+3)
+#define ATOM_LCD_SELFTEST_START									(ATOM_DISABLE+5)
+#define ATOM_LCD_SELFTEST_STOP									(ATOM_ENABLE+5)
+#define ATOM_ENCODER_INIT			                  (ATOM_DISABLE+7)
+#define ATOM_INIT			                          (ATOM_DISABLE+7)
+#define ATOM_GET_STATUS                         (ATOM_DISABLE+8)
+
+#define ATOM_BLANKING         1
+#define ATOM_BLANKING_OFF     0
+
+#define ATOM_CURSOR1          0
+#define ATOM_CURSOR2          1
+
+#define ATOM_ICON1            0
+#define ATOM_ICON2            1
+
+#define ATOM_CRT1             0
+#define ATOM_CRT2             1
+
+#define ATOM_TV_NTSC          1
+#define ATOM_TV_NTSCJ         2
+#define ATOM_TV_PAL           3
+#define ATOM_TV_PALM          4
+#define ATOM_TV_PALCN         5
+#define ATOM_TV_PALN          6
+#define ATOM_TV_PAL60         7
+#define ATOM_TV_SECAM         8
+#define ATOM_TV_CV            16
+
+#define ATOM_DAC1_PS2         1
+#define ATOM_DAC1_CV          2
+#define ATOM_DAC1_NTSC        3
+#define ATOM_DAC1_PAL         4
+
+#define ATOM_DAC2_PS2         ATOM_DAC1_PS2
+#define ATOM_DAC2_CV          ATOM_DAC1_CV
+#define ATOM_DAC2_NTSC        ATOM_DAC1_NTSC
+#define ATOM_DAC2_PAL         ATOM_DAC1_PAL
+ 
+#define ATOM_PM_ON            0
+#define ATOM_PM_STANDBY       1
+#define ATOM_PM_SUSPEND       2
+#define ATOM_PM_OFF           3
+
+/* Bit0:{=0:single, =1:dual},
+   Bit1 {=0:666RGB, =1:888RGB},
+   Bit2:3:{Grey level}
+   Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/
+
+#define ATOM_PANEL_MISC_DUAL               0x00000001
+#define ATOM_PANEL_MISC_888RGB             0x00000002
+#define ATOM_PANEL_MISC_GREY_LEVEL         0x0000000C
+#define ATOM_PANEL_MISC_FPDI               0x00000010
+#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT   2
+#define ATOM_PANEL_MISC_SPATIAL            0x00000020
+#define ATOM_PANEL_MISC_TEMPORAL           0x00000040
+#define ATOM_PANEL_MISC_API_ENABLED        0x00000080
+
+
+#define MEMTYPE_DDR1              "DDR1"
+#define MEMTYPE_DDR2              "DDR2"
+#define MEMTYPE_DDR3              "DDR3"
+#define MEMTYPE_DDR4              "DDR4"
+
+#define ASIC_BUS_TYPE_PCI         "PCI"
+#define ASIC_BUS_TYPE_AGP         "AGP"
+#define ASIC_BUS_TYPE_PCIE        "PCI_EXPRESS"
+
+/* Maximum size of that FireGL flag string */
+
+#define ATOM_FIREGL_FLAG_STRING     "FGL"             //Flag used to enable FireGL Support
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING  3        //sizeof( ATOM_FIREGL_FLAG_STRING )
+
+#define ATOM_FAKE_DESKTOP_STRING    "DSK"             //Flag used to enable mobile ASIC on Desktop
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING  ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 
+
+#define ATOM_M54T_FLAG_STRING       "M54T"            //Flag used to enable M54T Support
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING    4        //sizeof( ATOM_M54T_FLAG_STRING )
+
+#define HW_ASSISTED_I2C_STATUS_FAILURE          2
+#define HW_ASSISTED_I2C_STATUS_SUCCESS          1
+
+#pragma pack(1)                                       /* BIOS data must use byte aligment */
+
+/*  Define offset to location of ROM header. */
+
+#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER		0x00000048L
+#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE				    0x00000002L
+
+#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE    0x94
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE   20    /* including the terminator 0x0! */
+#define	OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER		0x002f
+#define	OFFSET_TO_GET_ATOMBIOS_STRINGS_START		0x006e
+
+/* Common header for all ROM Data tables.
+  Every table pointed  _ATOM_MASTER_DATA_TABLE has this common header. 
+  And the pointer actually points to this header. */
+
+typedef struct _ATOM_COMMON_TABLE_HEADER
+{
+  USHORT usStructureSize;
+  UCHAR  ucTableFormatRevision;   /*Change it when the Parser is not backward compatible */
+  UCHAR  ucTableContentRevision;  /*Change it only when the table needs to change but the firmware */
+                                  /*Image can't be updated, while Driver needs to carry the new table! */
+}ATOM_COMMON_TABLE_HEADER;
+
+/****************************************************************************/	
+// Structure stores the ROM header.
+/****************************************************************************/	
+typedef struct _ATOM_ROM_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER		sHeader;
+  UCHAR	 uaFirmWareSignature[4];    /*Signature to distinguish between Atombios and non-atombios, 
+                                      atombios should init it as "ATOM", don't change the position */
+  USHORT usBiosRuntimeSegmentAddress;
+  USHORT usProtectedModeInfoOffset;
+  USHORT usConfigFilenameOffset;
+  USHORT usCRC_BlockOffset;
+  USHORT usBIOS_BootupMessageOffset;
+  USHORT usInt10Offset;
+  USHORT usPciBusDevInitCode;
+  USHORT usIoBaseAddress;
+  USHORT usSubsystemVendorID;
+  USHORT usSubsystemID;
+  USHORT usPCI_InfoOffset; 
+  USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
+  USHORT usMasterDataTableOffset;   /*Offset for SW to get all data table offsets, Don't change the position */
+  UCHAR  ucExtendedFunctionCode;
+  UCHAR  ucReserved;
+}ATOM_ROM_HEADER;
+
+/*==============================Command Table Portion==================================== */
+
+#ifdef	UEFI_BUILD
+	#define	UTEMP	USHORT
+	#define	USHORT	void*
+#endif
+
+/****************************************************************************/	
+// Structures used in Command.mtb 
+/****************************************************************************/	
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+  USHORT ASIC_Init;                              //Function Table, used by various SW components,latest version 1.1
+  USHORT GetDisplaySurfaceSize;                  //Atomic Table,  Used by Bios when enabling HW ICON
+  USHORT ASIC_RegistersInit;                     //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT VRAM_BlockVenderDetection;              //Atomic Table,  used only by Bios
+  USHORT DIGxEncoderControl;										 //Only used by Bios
+  USHORT MemoryControllerInit;                   //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT EnableCRTCMemReq;                       //Function Table,directly used by various SW components,latest version 2.1
+  USHORT MemoryParamAdjust; 										 //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock if needed
+  USHORT DVOEncoderControl;                      //Function Table,directly used by various SW components,latest version 1.2
+  USHORT GPIOPinControl;												 //Atomic Table,  only used by Bios
+  USHORT SetEngineClock;                         //Function Table,directly used by various SW components,latest version 1.1
+  USHORT SetMemoryClock;                         //Function Table,directly used by various SW components,latest version 1.1
+  USHORT SetPixelClock;                          //Function Table,directly used by various SW components,latest version 1.2  
+  USHORT EnableDispPowerGating;                  //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT ResetMemoryDLL;                         //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT ResetMemoryDevice;                      //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT MemoryPLLInit;                          //Atomic Table,  used only by Bios
+  USHORT AdjustDisplayPll;											 //Atomic Table,  used by various SW componentes. 
+  USHORT AdjustMemoryController;                 //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock                
+  USHORT EnableASIC_StaticPwrMgt;                //Atomic Table,  only used by Bios
+  USHORT ASIC_StaticPwrMgtStatusChange;          //Obsolete ,     only used by Bios   
+  USHORT DAC_LoadDetection;                      //Atomic Table,  directly used by various SW components,latest version 1.2  
+  USHORT LVTMAEncoderControl;                    //Atomic Table,directly used by various SW components,latest version 1.3
+  USHORT HW_Misc_Operation;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT DAC1EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1  
+  USHORT DAC2EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT DVOOutputControl;                       //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT CV1OutputControl;                       //Atomic Table,  Atomic Table,  Obsolete from Ry6xx, use DAC2 Output instead 
+  USHORT GetConditionalGoldenSetting;            //Only used by Bios
+  USHORT TVEncoderControl;                       //Function Table,directly used by various SW components,latest version 1.1
+  USHORT PatchMCSetting;                         //only used by BIOS
+  USHORT MC_SEQ_Control;                         //only used by BIOS
+  USHORT TV1OutputControl;                       //Atomic Table,  Obsolete from Ry6xx, use DAC2 Output instead
+  USHORT EnableScaler;                           //Atomic Table,  used only by Bios
+  USHORT BlankCRTC;                              //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableCRTC;                             //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT GetPixelClock;                          //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableVGA_Render;                       //Function Table,directly used by various SW components,latest version 1.1
+  USHORT GetSCLKOverMCLKRatio;                   //Atomic Table,  only used by Bios
+  USHORT SetCRTC_Timing;                         //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetCRTC_OverScan;                       //Atomic Table,  used by various SW components,latest version 1.1 
+  USHORT SetCRTC_Replication;                    //Atomic Table,  used only by Bios
+  USHORT SelectCRTC_Source;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableGraphSurfaces;                    //Atomic Table,  used only by Bios
+  USHORT UpdateCRTC_DoubleBufferRegisters;			 //Atomic Table,  used only by Bios
+  USHORT LUT_AutoFill;                           //Atomic Table,  only used by Bios
+  USHORT EnableHW_IconCursor;                    //Atomic Table,  only used by Bios
+  USHORT GetMemoryClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT GetEngineClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT SetCRTC_UsingDTDTiming;                 //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ExternalEncoderControl;                 //Atomic Table,  directly used by various SW components,latest version 2.1
+  USHORT LVTMAOutputControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT VRAM_BlockDetectionByStrap;             //Atomic Table,  used only by Bios
+  USHORT MemoryCleanUp;                          //Atomic Table,  only used by Bios    
+  USHORT ProcessI2cChannelTransaction;           //Function Table,only used by Bios
+  USHORT WriteOneByteToHWAssistedI2C;            //Function Table,indirectly used by various SW components 
+  USHORT ReadHWAssistedI2CStatus;                //Atomic Table,  indirectly used by various SW components
+  USHORT SpeedFanControl;                        //Function Table,indirectly used by various SW components,called from ASIC_Init
+  USHORT PowerConnectorDetection;                //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT MC_Synchronization;                     //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT ComputeMemoryEnginePLL;                 //Atomic Table,  indirectly used by various SW components,called from SetMemory/EngineClock
+  USHORT MemoryRefreshConversion;                //Atomic Table,  indirectly used by various SW components,called from SetMemory or SetEngineClock
+  USHORT VRAM_GetCurrentInfoBlock;               //Atomic Table,  used only by Bios
+  USHORT DynamicMemorySettings;                  //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT MemoryTraining;                         //Atomic Table,  used only by Bios
+  USHORT EnableSpreadSpectrumOnPPLL;             //Atomic Table,  directly used by various SW components,latest version 1.2
+  USHORT TMDSAOutputControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetVoltage;                             //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+  USHORT DAC1OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT DAC2OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ComputeMemoryClockParam;                //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+  USHORT ClockSource;                            //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT MemoryDeviceInit;                       //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT GetDispObjectInfo;                      //Atomic Table,  indirectly used by various SW components,called from EnableVGARender
+  USHORT DIG1EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG2EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG1TransmitterControl;                 //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG2TransmitterControl;	               //Atomic Table,directly used by various SW components,latest version 1.1 
+  USHORT ProcessAuxChannelTransaction;					 //Function Table,only used by Bios
+  USHORT DPEncoderService;											 //Function Table,only used by Bios
+  USHORT GetVoltageInfo;                         //Function Table,only used by Bios since SI
+}ATOM_MASTER_LIST_OF_COMMAND_TABLES;   
+
+// For backward compatible 
+#define ReadEDIDFromHWAssistedI2C                ProcessI2cChannelTransaction
+#define DPTranslatorControl                      DIG2EncoderControl
+#define UNIPHYTransmitterControl			     DIG1TransmitterControl
+#define LVTMATransmitterControl				     DIG2TransmitterControl
+#define SetCRTC_DPM_State                        GetConditionalGoldenSetting
+#define SetUniphyInstance                        ASIC_StaticPwrMgtStatusChange
+#define HPDInterruptService                      ReadHWAssistedI2CStatus
+#define EnableVGA_Access                         GetSCLKOverMCLKRatio
+#define EnableYUV                                GetDispObjectInfo                         
+#define DynamicClockGating                       EnableDispPowerGating
+#define SetupHWAssistedI2CStatus                 ComputeMemoryClockParam
+
+#define TMDSAEncoderControl                      PatchMCSetting
+#define LVDSEncoderControl                       MC_SEQ_Control
+#define LCD1OutputControl                        HW_Misc_Operation
+
+
+typedef struct _ATOM_MASTER_COMMAND_TABLE
+{
+  ATOM_COMMON_TABLE_HEADER           sHeader;
+  ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+}ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/	
+// Structures used in every command table
+/****************************************************************************/	
+typedef struct _ATOM_TABLE_ATTRIBUTE
+{
+#if ATOM_BIG_ENDIAN
+  USHORT  UpdatedByUtility:1;         //[15]=Table updated by utility flag
+  USHORT  PS_SizeInBytes:7;           //[14:8]=Size of parameter space in Bytes (multiple of a dword), 
+  USHORT  WS_SizeInBytes:8;           //[7:0]=Size of workspace in Bytes (in multiple of a dword), 
+#else
+  USHORT  WS_SizeInBytes:8;           //[7:0]=Size of workspace in Bytes (in multiple of a dword), 
+  USHORT  PS_SizeInBytes:7;           //[14:8]=Size of parameter space in Bytes (multiple of a dword), 
+  USHORT  UpdatedByUtility:1;         //[15]=Table updated by utility flag
+#endif
+}ATOM_TABLE_ATTRIBUTE;
+
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
+{
+  ATOM_TABLE_ATTRIBUTE sbfAccess;
+  USHORT               susAccess;
+}ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/	
+// Common header for all command tables.
+// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. 
+// And the pointer actually points to this header.
+/****************************************************************************/	
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER CommonHeader;
+  ATOM_TABLE_ATTRIBUTE     TableAttribute;	
+}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+
+/****************************************************************************/	
+// Structures used by ComputeMemoryEnginePLLTable
+/****************************************************************************/	
+#define COMPUTE_MEMORY_PLL_PARAM        1
+#define COMPUTE_ENGINE_PLL_PARAM        2
+#define ADJUST_MC_SETTING_PARAM         3
+
+/****************************************************************************/	
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/	
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulClockFreq:24;
+#else
+  ULONG ulClockFreq:24;
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG             0x80
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+{
+  ULONG   ulClock;        //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
+  UCHAR   ucAction;       //0:reserved //1:Memory //2:Engine  
+  UCHAR   ucReserved;     //may expand to return larger Fbdiv later
+  UCHAR   ucFbDiv;        //return value
+  UCHAR   ucPostDiv;      //return value
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
+{
+  ULONG   ulClock;        //When return, [23:0] return real clock 
+  UCHAR   ucAction;       //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
+  USHORT  usFbDiv;		    //return Feedback value to be written to register
+  UCHAR   ucPostDiv;      //return post div to be written to register
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION   COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+
+
+#define SET_CLOCK_FREQ_MASK                     0x00FFFFFF  //Clock change tables only take bit [23:0] as the requested clock value
+#define USE_NON_BUS_CLOCK_MASK                  0x01000000  //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define USE_MEMORY_SELF_REFRESH_MASK            0x02000000	//Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04000000  //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define FIRST_TIME_CHANGE_CLOCK									0x08000000	//Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define SKIP_SW_PROGRAM_PLL											0x10000000	//Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+#define USE_SS_ENABLED_PIXEL_CLOCK  USE_NON_BUS_CLOCK_MASK
+
+#define b3USE_NON_BUS_CLOCK_MASK                  0x01       //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define b3USE_MEMORY_SELF_REFRESH                 0x02	     //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04       //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define b3FIRST_TIME_CHANGE_CLOCK									0x08       //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define b3SKIP_SW_PROGRAM_PLL											0x10			 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulComputeClockFlag:8;                 // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+  ULONG ulClockFreq:24;                       // in unit of 10kHz
+#else
+  ULONG ulClockFreq:24;                       // in unit of 10kHz
+  ULONG ulComputeClockFlag:8;                 // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+#endif
+}ATOM_COMPUTE_CLOCK_FREQ;
+
+typedef struct _ATOM_S_MPLL_FB_DIVIDER
+{
+  USHORT usFbDivFrac;  
+  USHORT usFbDiv;  
+}ATOM_S_MPLL_FB_DIVIDER;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter      
+  UCHAR   ucPostDiv;                          //Output Parameter      
+  UCHAR   ucCntlFlag;                         //Output Parameter      
+  UCHAR   ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+// ucCntlFlag
+#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN          1
+#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE            2
+#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE         4
+#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9						8
+
+
+// V4 are only used for APU which PLL outside GPU
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucPostDiv;          //return parameter: post divider which is used to program to register directly
+  ULONG  ulClock:24;         //Input= target clock, output = actual clock 
+#else
+  ULONG  ulClock:24;         //Input= target clock, output = actual clock 
+  ULONG  ucPostDiv;          //return parameter: post divider which is used to program to register directly
+#endif
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter      
+  UCHAR   ucPostDiv;                          //Output Parameter      
+  union
+  {
+    UCHAR   ucCntlFlag;                       //Output Flags
+    UCHAR   ucInputFlag;                      //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+  };
+  UCHAR   ucReserved;                       
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
+
+// use for ComputeMemoryClockParamTable
+typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
+{
+  union
+  {
+    ULONG  ulClock;         
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output:UPPER_WORD=FB_DIV_INTEGER,  LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
+  };
+  UCHAR   ucDllSpeed;                         //Output 
+  UCHAR   ucPostDiv;                          //Output
+  union{
+    UCHAR   ucInputFlag;                      //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
+    UCHAR   ucPllCntlFlag;                    //Output: 
+  };
+  UCHAR   ucBWCntl;                       
+}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
+
+// definition of ucInputFlag
+#define MPLL_INPUT_FLAG_STROBE_MODE_EN          0x01
+// definition of ucPllCntlFlag
+#define MPLL_CNTL_FLAG_VCO_MODE_MASK            0x03 
+#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL            0x04
+#define MPLL_CNTL_FLAG_QDR_ENABLE               0x08
+#define MPLL_CNTL_FLAG_AD_HALF_RATE             0x10
+
+//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
+#define MPLL_CNTL_FLAG_BYPASS_AD_PLL            0x04
+
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  ULONG ulReserved[2];
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
+
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  ULONG ulMemoryClock;
+  ULONG ulReserved;
+}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/	
+// Structures used by SetEngineClockTable
+/****************************************************************************/	
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+}SET_ENGINE_CLOCK_PARAMETERS;
+
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by SetMemoryClockTable
+/****************************************************************************/	
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+}SET_MEMORY_CLOCK_PARAMETERS;
+
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by ASIC_Init.ctb
+/****************************************************************************/	
+typedef struct _ASIC_INIT_PARAMETERS
+{
+  ULONG ulDefaultEngineClock;         //In 10Khz unit
+  ULONG ulDefaultMemoryClock;         //In 10Khz unit
+}ASIC_INIT_PARAMETERS;
+
+typedef struct _ASIC_INIT_PS_ALLOCATION
+{
+  ASIC_INIT_PARAMETERS sASICInitClocks;
+  SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
+}ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structure used by DynamicClockGatingTable.ctb
+/****************************************************************************/	
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS 
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[3];
+}DYNAMIC_CLOCK_GATING_PARAMETERS;
+#define  DYNAMIC_CLOCK_GATING_PS_ALLOCATION  DYNAMIC_CLOCK_GATING_PARAMETERS
+
+/****************************************************************************/	
+// Structure used by EnableDispPowerGatingTable.ctb
+/****************************************************************************/	
+typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 
+{
+  UCHAR ucDispPipeId;                 // ATOM_CRTC1, ATOM_CRTC2, ...
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[2];
+}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
+
+/****************************************************************************/	
+// Structure used by EnableASIC_StaticPwrMgtTable.ctb
+/****************************************************************************/	
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[3];
+}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION  ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by DAC_LoadDetectionTable.ctb
+/****************************************************************************/	
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS
+{
+  USHORT usDeviceID;                  //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
+  UCHAR  ucDacType;                   //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
+  UCHAR  ucMisc;											//Valid only when table revision =1.3 and above
+}DAC_LOAD_DETECTION_PARAMETERS;
+
+// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
+#define DAC_LOAD_MISC_YPrPb						0x01
+
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
+{
+  DAC_LOAD_DETECTION_PARAMETERS            sDacload;
+  ULONG                                    Reserved[2];// Don't set this one, allocation for EXT DAC
+}DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
+/****************************************************************************/	
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS 
+{
+  USHORT usPixelClock;                // in 10KHz; for bios convenient
+  UCHAR  ucDacStandard;               // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
+  UCHAR  ucAction;                    // 0: turn off encoder
+                                      // 1: setup and turn on encoder
+                                      // 7: ATOM_ENCODER_INIT Initialize DAC
+}DAC_ENCODER_CONTROL_PARAMETERS;
+
+#define DAC_ENCODER_CONTROL_PS_ALLOCATION  DAC_ENCODER_CONTROL_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by DIG1EncoderControlTable
+//                    DIG2EncoderControlTable
+//                    ExternalEncoderControlTable
+/****************************************************************************/	
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+  UCHAR  ucConfig;		  
+                            // [2] Link Select:
+                            // =0: PHY linkA if bfLane<3
+                            // =1: PHY linkB if bfLanes<3
+                            // =0: PHY linkA+B if bfLanes=3
+                            // [3] Transmitter Sel
+                            // =0: UNIPHY or PCIEPHY
+                            // =1: LVTMA 					
+  UCHAR ucAction;           // =0: turn off encoder					
+                            // =1: turn on encoder			
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucReserved[2];
+}DIG_ENCODER_CONTROL_PARAMETERS;
+#define DIG_ENCODER_CONTROL_PS_ALLOCATION			  DIG_ENCODER_CONTROL_PARAMETERS
+#define EXTERNAL_ENCODER_CONTROL_PARAMETER			DIG_ENCODER_CONTROL_PARAMETERS
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK				0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ		0x00
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ		0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ		0x02
+#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK				  0x04
+#define ATOM_ENCODER_CONFIG_LINKA								  0x00
+#define ATOM_ENCODER_CONFIG_LINKB								  0x04
+#define ATOM_ENCODER_CONFIG_LINKA_B							  ATOM_TRANSMITTER_CONFIG_LINKA
+#define ATOM_ENCODER_CONFIG_LINKB_A							  ATOM_ENCODER_CONFIG_LINKB
+#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK	0x08
+#define ATOM_ENCODER_CONFIG_UNIPHY							  0x00
+#define ATOM_ENCODER_CONFIG_LVTMA								  0x08
+#define ATOM_ENCODER_CONFIG_TRANSMITTER1				  0x00
+#define ATOM_ENCODER_CONFIG_TRANSMITTER2				  0x08
+#define ATOM_ENCODER_CONFIG_DIGB								  0x80			// VBIOS Internal use, outside SW should set this bit=0
+// ucAction
+// ATOM_ENABLE:  Enable Encoder
+// ATOM_DISABLE: Disable Encoder
+
+//ucEncoderMode
+#define ATOM_ENCODER_MODE_DP											0
+#define ATOM_ENCODER_MODE_LVDS										1
+#define ATOM_ENCODER_MODE_DVI											2
+#define ATOM_ENCODER_MODE_HDMI										3
+#define ATOM_ENCODER_MODE_SDVO										4
+#define ATOM_ENCODER_MODE_DP_AUDIO                5
+#define ATOM_ENCODER_MODE_TV											13
+#define ATOM_ENCODER_MODE_CV											14
+#define ATOM_ENCODER_MODE_CRT											15
+#define ATOM_ENCODER_MODE_DVO											16
+#define ATOM_ENCODER_MODE_DP_SST                  ATOM_ENCODER_MODE_DP    // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST                  5                       // For DP1.2
+
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:2;
+    UCHAR ucTransmitterSel:2;     // =0: UniphyAB, =1: UniphyCD  =2: UniphyEF
+    UCHAR ucLinkSel:1;            // =0: linkA/C/E =1: linkB/D/F
+    UCHAR ucReserved:1;
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+    UCHAR ucReserved:1;
+    UCHAR ucLinkSel:1;            // =0: linkA/C/E =1: linkB/D/F
+    UCHAR ucTransmitterSel:2;     // =0: UniphyAB, =1: UniphyCD  =2: UniphyEF
+    UCHAR ucReserved1:2;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V2;
+
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+  UCHAR ucAction;                                       
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucStatus;           // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
+  UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK				0x01
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK				  0x04
+#define ATOM_ENCODER_CONFIG_V2_LINKA								  0x00
+#define ATOM_ENCODER_CONFIG_V2_LINKB								  0x04
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK	  0x18
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1				    0x00
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2				    0x08
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3				    0x10
+
+// ucAction:
+// ATOM_DISABLE
+// ATOM_ENABLE
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START       0x08
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1    0x09
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2    0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3    0x13
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE    0x0b
+#define ATOM_ENCODER_CMD_DP_VIDEO_OFF                 0x0c
+#define ATOM_ENCODER_CMD_DP_VIDEO_ON                  0x0d
+#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS    0x0e
+#define ATOM_ENCODER_CMD_SETUP                        0x0f
+#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE             0x10
+
+// ucStatus
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE    0x10
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE  0x00
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:3;
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+    UCHAR ucReserved:3;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V3;
+
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL					  0x70
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER					  0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
+  UCHAR ucAction;                              
+  union {
+    UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+    UCHAR ucPanelMode;      // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+	                    // =0:     external DP
+	                    // =1:     internal DP2
+	                    // =0x11:  internal DP1 for NutMeg/Travis DP translator
+  };
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI           
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:2;
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+#else
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+    UCHAR ucReserved:2;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ		  0x02
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ		  0x03
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL					  0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER					  0x50
+#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER					  0x60
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  union{
+  ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;                              
+  union {
+    UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+    UCHAR ucPanelMode;      // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+	                    // =0:     external DP
+	                    // =1:     internal DP2
+	                    // =0x11:  internal DP1 for NutMeg/Travis DP translator
+  };
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucHPD_ID;           // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
+
+// define ucBitPerColor: 
+#define PANEL_BPC_UNDEFINE                               0x00
+#define PANEL_6BIT_PER_COLOR                             0x01 
+#define PANEL_8BIT_PER_COLOR                             0x02
+#define PANEL_10BIT_PER_COLOR                            0x03
+#define PANEL_12BIT_PER_COLOR                            0x04
+#define PANEL_16BIT_PER_COLOR                            0x05
+
+//define ucPanelMode
+#define DP_PANEL_MODE_EXTERNAL_DP_MODE                   0x00
+#define DP_PANEL_MODE_INTERNAL_DP2_MODE                  0x01
+#define DP_PANEL_MODE_INTERNAL_DP1_MODE                  0x11
+
+/****************************************************************************/	
+// Structures used by UNIPHYTransmitterControlTable
+//                    LVTMATransmitterControlTable
+//                    DVOOutputControlTable
+/****************************************************************************/	
+typedef struct _ATOM_DP_VS_MODE
+{
+  UCHAR ucLaneSel;
+  UCHAR ucLaneSet;
+}ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
+{
+	union
+	{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+	USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+  ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  UCHAR ucConfig;
+													// [0]=0: 4 lane Link,      
+													//    =1: 8 lane Link ( Dual Links TMDS ) 
+                          // [1]=0: InCoherent mode   
+													//    =1: Coherent Mode										
+													// [2] Link Select:
+  												// =0: PHY linkA   if bfLane<3
+													// =1: PHY linkB   if bfLanes<3
+		  										// =0: PHY linkA+B if bfLanes=3		
+                          // [5:4]PCIE lane Sel
+                          // =0: lane 0~3 or 0~7
+                          // =1: lane 4~7
+                          // =2: lane 8~11 or 8~15
+                          // =3: lane 12~15 
+	UCHAR ucAction;				  // =0: turn off encoder					
+	                        // =1: turn on encoder			
+  UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION		DIG_TRANSMITTER_CONTROL_PARAMETERS					
+
+//ucInitInfo
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK	0x00ff			
+
+//ucConfig 
+#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK			0x01
+#define ATOM_TRANSMITTER_CONFIG_COHERENT				0x02
+#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK		0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA						0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB						0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B					0x00			
+#define ATOM_TRANSMITTER_CONFIG_LINKB_A					0x04
+
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK	0x08			// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER		0x00				// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER		0x08				// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK			0x30
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL			0x00
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE			0x20
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN		0x30
+#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK		0xc0
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_3				0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_7				0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_4_7				0x40
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_11				0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_15				0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_12_15			0xc0
+
+//ucAction
+#define ATOM_TRANSMITTER_ACTION_DISABLE					       0
+#define ATOM_TRANSMITTER_ACTION_ENABLE					       1
+#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF				       2
+#define ATOM_TRANSMITTER_ACTION_LCD_BLON				       3
+#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL  4
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START		 5
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP			 6
+#define ATOM_TRANSMITTER_ACTION_INIT						       7
+#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT	       8
+#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT		       9
+#define ATOM_TRANSMITTER_ACTION_SETUP						       10
+#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH           11
+#define ATOM_TRANSMITTER_ACTION_POWER_ON               12
+#define ATOM_TRANSMITTER_ACTION_POWER_OFF              13
+
+// Following are used for DigTransmitterControlTable ver1.2
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucReserved:1;               
+  UCHAR fDPConnector:1;             //bit4=0: DP connector  =1: None DP connector
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+  UCHAR fDPConnector:1;             //bit4=0: DP connector  =1: None DP connector
+  UCHAR ucReserved:1;               
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V2;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR			0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT				          0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA  			            0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKB				            0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER		          0x00				// only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER		          0x08				// only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+
+// Bit4
+#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR			        0x10
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3           	0x80	//EF
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
+{
+	union
+	{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+	USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+  ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+	UCHAR ucAction;				  // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V3;
+
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
+{
+	union
+	{
+    USHORT usPixelClock;		// in 10KHz; for bios convenient
+	  USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
+	UCHAR ucAction;				    // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR			0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT				          0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKA  			            0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKB				            0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER		          0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER		          0x08
+
+// Bit5:4
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 	        0x30
+#define ATOM_TRASMITTER_CONFIG_V3_P1PLL          		        0x00
+#define ATOM_TRASMITTER_CONFIG_V3_P2PLL		                  0x10
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT            0x20
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3           	0x80	//EF
+
+
+/****************************************************************************/	
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/	
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+  UCHAR ucLaneSel;
+ 	union
+ 	{  
+ 	  UCHAR ucLaneSet;
+ 	  struct {
+#if ATOM_BIG_ENDIAN
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+#else
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+#endif
+ 		};
+ 	}; 
+}ATOM_DP_VS_MODE_V4;
+ 
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+  union
+  {
+    USHORT usPixelClock;		// in 10KHz; for bios convenient
+    USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode     Redefined comparing to previous version
+  };
+  union
+  {
+  ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;				    // define as ATOM_TRANSMITER_ACTION_XXX	                        
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR			0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT				          0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA  			            0x00			
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB				            0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER		          0x00				 
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER		          0x08				
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 	        0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL         		        0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL		                0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL		                0x20   // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT           0x30   // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3           	0x80	//EF
+
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucReservd1:1;
+  UCHAR ucHPDSel:3;
+  UCHAR ucPhyClkSrcId:2;            
+  UCHAR ucCoherentMode:1;            
+  UCHAR ucReserved:1;
+#else
+  UCHAR ucReserved:1;
+  UCHAR ucCoherentMode:1;            
+  UCHAR ucPhyClkSrcId:2;            
+  UCHAR ucHPDSel:3;
+  UCHAR ucReservd1:1;
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V5;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+{
+  USHORT usSymClock;		        // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock,  (HDMI deep color), =pixel clock * deep_color_ratio
+  UCHAR  ucPhyId;                   // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
+  UCHAR  ucAction;				    // define as ATOM_TRANSMITER_ACTION_xxx
+  UCHAR  ucLaneNum;                 // indicate lane number 1-8
+  UCHAR  ucConnObjId;               // Connector Object Id defined in ObjectId.h
+  UCHAR  ucDigMode;                 // indicate DIG mode
+  union{
+  ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR  ucDigEncoderSel;           // indicate DIG front end encoder 
+  UCHAR  ucDPLaneSet;
+  UCHAR  ucReserved;
+  UCHAR  ucReserved1;
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
+
+//ucPhyId
+#define ATOM_PHY_ID_UNIPHYA                                 0  
+#define ATOM_PHY_ID_UNIPHYB                                 1
+#define ATOM_PHY_ID_UNIPHYC                                 2
+#define ATOM_PHY_ID_UNIPHYD                                 3
+#define ATOM_PHY_ID_UNIPHYE                                 4
+#define ATOM_PHY_ID_UNIPHYF                                 5
+#define ATOM_PHY_ID_UNIPHYG                                 6
+
+// ucDigEncoderSel
+#define ATOM_TRANMSITTER_V5__DIGA_SEL                       0x01
+#define ATOM_TRANMSITTER_V5__DIGB_SEL                       0x02
+#define ATOM_TRANMSITTER_V5__DIGC_SEL                       0x04
+#define ATOM_TRANMSITTER_V5__DIGD_SEL                       0x08
+#define ATOM_TRANMSITTER_V5__DIGE_SEL                       0x10
+#define ATOM_TRANMSITTER_V5__DIGF_SEL                       0x20
+#define ATOM_TRANMSITTER_V5__DIGG_SEL                       0x40
+
+// ucDigMode
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP                      0
+#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS                    1
+#define ATOM_TRANSMITTER_DIGMODE_V5_DVI                     2
+#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI                    3
+#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO                    4
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST                  5
+
+// ucDPLaneSet
+#define DP_LANE_SET__0DB_0_4V                               0x00
+#define DP_LANE_SET__0DB_0_6V                               0x01
+#define DP_LANE_SET__0DB_0_8V                               0x02
+#define DP_LANE_SET__0DB_1_2V                               0x03
+#define DP_LANE_SET__3_5DB_0_4V                             0x08  
+#define DP_LANE_SET__3_5DB_0_6V                             0x09
+#define DP_LANE_SET__3_5DB_0_8V                             0x0a
+#define DP_LANE_SET__6DB_0_4V                               0x10
+#define DP_LANE_SET__6DB_0_6V                               0x11
+#define DP_LANE_SET__9_5DB_0_4V                             0x18  
+
+// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+// Bit1
+#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT				          0x02
+
+// Bit3:2
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 	        0x0c
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT		    0x02
+
+#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL         		        0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL		                0x04
+#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL		                0x08   
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT           0x0c
+// Bit6:4
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK		          0x70
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT		      0x04
+
+#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL				        0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL				          0x10
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL				          0x20
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL				          0x30
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL				          0x40
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL				          0x50
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL				          0x60
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5            DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+
+
+/****************************************************************************/	
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+  union{
+  USHORT usPixelClock;      // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT 
+  USHORT usConnectorId;     // connector id, valid when ucAction = INIT
+  };
+  UCHAR  ucConfig;          // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucAction;          // 
+  UCHAR  ucEncoderMode;     // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+  UCHAR  ucLaneNum;         // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucBitPerColor;     // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+  UCHAR  ucReserved;        
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT         0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT          0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT           0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP          0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF   0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING       0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION      0x12
+#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP              0x14
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ		  0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK		    0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1		            0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2		            0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3		            0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+  EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+  ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
+/****************************************************************************/	
+// Structures used by DAC1OuputControlTable
+//                    DAC2OuputControlTable
+//                    LVTMAOutputControlTable  (Before DEC30)
+//                    TMDSAOutputControlTable  (Before DEC30)
+/****************************************************************************/	
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+{
+  UCHAR  ucAction;                    // Possible input:ATOM_ENABLE||ATOMDISABLE
+                                      // When the display is LCD, in addition to above:
+                                      // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
+                                      // ATOM_LCD_SELFTEST_STOP
+                                      
+  UCHAR  aucPadding[3];               // padding to DWORD aligned
+}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+
+#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 
+#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CRT2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 
+#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define TV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define TV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define LCD1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DVO_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DVO_OUTPUT_CONTROL_PS_ALLOCATION   DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
+#define DVO_OUTPUT_CONTROL_PARAMETERS_V3	 DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by BlankCRTCTable
+/****************************************************************************/	
+typedef struct _BLANK_CRTC_PARAMETERS
+{
+  UCHAR  ucCRTC;                    	// ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR  ucBlanking;                  // ATOM_BLANKING or ATOM_BLANKINGOFF
+  USHORT usBlackColorRCr;
+  USHORT usBlackColorGY;
+  USHORT usBlackColorBCb;
+}BLANK_CRTC_PARAMETERS;
+#define BLANK_CRTC_PS_ALLOCATION    BLANK_CRTC_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by EnableCRTCTable
+//                    EnableCRTCMemReqTable
+//                    UpdateCRTC_DoubleBufferRegistersTable
+/****************************************************************************/	
+typedef struct _ENABLE_CRTC_PARAMETERS
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE 
+  UCHAR ucPadding[2];
+}ENABLE_CRTC_PARAMETERS;
+#define ENABLE_CRTC_PS_ALLOCATION   ENABLE_CRTC_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SetCRTC_OverScanTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
+{
+  USHORT usOverscanRight;             // right
+  USHORT usOverscanLeft;              // left
+  USHORT usOverscanBottom;            // bottom
+  USHORT usOverscanTop;               // top
+  UCHAR  ucCRTC;                      // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR  ucPadding[3];
+}SET_CRTC_OVERSCAN_PARAMETERS;
+#define SET_CRTC_OVERSCAN_PS_ALLOCATION  SET_CRTC_OVERSCAN_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SetCRTC_ReplicationTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS
+{
+  UCHAR ucH_Replication;              // horizontal replication
+  UCHAR ucV_Replication;              // vertical replication
+  UCHAR usCRTC;                       // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucPadding;
+}SET_CRTC_REPLICATION_PARAMETERS;
+#define SET_CRTC_REPLICATION_PS_ALLOCATION  SET_CRTC_REPLICATION_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SelectCRTC_SourceTable
+/****************************************************************************/	
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucDevice;                     // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
+  UCHAR ucPadding[2];
+}SELECT_CRTC_SOURCE_PARAMETERS;
+#define SELECT_CRTC_SOURCE_PS_ALLOCATION  SELECT_CRTC_SOURCE_PARAMETERS
+
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEncoderID;                  // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+  UCHAR ucEncodeMode;									// Encoding mode, only valid when using DIG1/DIG2/DVO
+  UCHAR ucPadding;
+}SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+//ucEncoderID
+//#define ASIC_INT_DAC1_ENCODER_ID    						0x00 
+//#define ASIC_INT_TV_ENCODER_ID									0x02
+//#define ASIC_INT_DIG1_ENCODER_ID								0x03
+//#define ASIC_INT_DAC2_ENCODER_ID								0x04
+//#define ASIC_EXT_TV_ENCODER_ID									0x06
+//#define ASIC_INT_DVO_ENCODER_ID									0x07
+//#define ASIC_INT_DIG2_ENCODER_ID								0x09
+//#define ASIC_EXT_DIG_ENCODER_ID									0x05
+
+//ucEncodeMode
+//#define ATOM_ENCODER_MODE_DP										0
+//#define ATOM_ENCODER_MODE_LVDS									1
+//#define ATOM_ENCODER_MODE_DVI										2
+//#define ATOM_ENCODER_MODE_HDMI									3
+//#define ATOM_ENCODER_MODE_SDVO									4
+//#define ATOM_ENCODER_MODE_TV										13
+//#define ATOM_ENCODER_MODE_CV										14
+//#define ATOM_ENCODER_MODE_CRT										15
+
+/****************************************************************************/	
+// Structures used by SetPixelClockTable
+//                    GetPixelClockTable 
+/****************************************************************************/	
+//Major revision=1., Minor revision=1
+typedef struct _PIXEL_CLOCK_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucRefDivSrc;                 // ATOM_PJITTER or ATO_NONPJITTER
+  UCHAR  ucCRTC;                      // Which CRTC uses this Ppll
+  UCHAR  ucPadding;
+}PIXEL_CLOCK_PARAMETERS;
+
+//Major revision=1., Minor revision=2, add ucMiscIfno
+//ucMiscInfo:
+#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
+#define MISC_DEVICE_INDEX_MASK        0xF0
+#define MISC_DEVICE_INDEX_SHIFT       4
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucRefDivSrc;                 // ATOM_PJITTER or ATO_NONPJITTER
+  UCHAR  ucCRTC;                      // Which CRTC uses this Ppll
+  UCHAR  ucMiscInfo;                  // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
+}PIXEL_CLOCK_PARAMETERS_V2;
+
+//Major revision=1., Minor revision=3, structure/definition change
+//ucEncoderMode:
+//ATOM_ENCODER_MODE_DP
+//ATOM_ENOCDER_MODE_LVDS
+//ATOM_ENOCDER_MODE_DVI
+//ATOM_ENOCDER_MODE_HDMI
+//ATOM_ENOCDER_MODE_SDVO
+//ATOM_ENCODER_MODE_TV										13
+//ATOM_ENCODER_MODE_CV										14
+//ATOM_ENCODER_MODE_CRT										15
+
+//ucDVOConfig
+//#define DVO_ENCODER_CONFIG_RATE_SEL							0x01
+//#define DVO_ENCODER_CONFIG_DDR_SPEED						0x00
+//#define DVO_ENCODER_CONFIG_SDR_SPEED						0x01
+//#define DVO_ENCODER_CONFIG_OUTPUT_SEL						0x0c
+//#define DVO_ENCODER_CONFIG_LOW12BIT							0x00
+//#define DVO_ENCODER_CONFIG_UPPER12BIT						0x04
+//#define DVO_ENCODER_CONFIG_24BIT								0x08
+
+//ucMiscInfo: also changed, see below
+#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL						0x01
+#define PIXEL_CLOCK_MISC_VGA_MODE										0x02
+#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK							0x04
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1							0x00
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2							0x04
+#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK			0x08
+#define PIXEL_CLOCK_MISC_REF_DIV_SRC                    0x10
+// V1.4 for RoadRunner
+#define PIXEL_CLOCK_V4_MISC_SS_ENABLE               0x10
+#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE           0x20
+
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucTransmitterId;             // graphic encoder id defined in objectId.h
+	union
+	{
+  UCHAR  ucEncoderMode;               // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
+	UCHAR  ucDVOConfig;									// when use DVO, need to know SDR/DDR, 12bit or 24bit
+	};
+  UCHAR  ucMiscInfo;                  // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
+                                      // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
+                                      // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
+}PIXEL_CLOCK_PARAMETERS_V3;
+
+#define PIXEL_CLOCK_PARAMETERS_LAST			PIXEL_CLOCK_PARAMETERS_V2
+#define GET_PIXEL_CLOCK_PS_ALLOCATION		PIXEL_CLOCK_PARAMETERS_LAST
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V5
+{
+  UCHAR  ucCRTC;             // ATOM_CRTC1~6, indicate the CRTC controller to 
+                             // drive the pixel clock. not used for DCPLL case.
+  union{
+  UCHAR  ucReserved;
+  UCHAR  ucFracFbDiv;        // [gphan] temporary to prevent build problem.  remove it after driver code is changed.
+  };
+  USHORT usPixelClock;       // target the pixel clock to drive the CRTC timing
+                             // 0 means disable PPLL/DCPLL. 
+  USHORT usFbDiv;            // feedback divider integer part. 
+  UCHAR  ucPostDiv;          // post divider. 
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
+                             // indicate which graphic encoder will be used. 
+  UCHAR  ucEncoderMode;      // Encoder mode: 
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
+                             // bit[1]= when VGA timing is used. 
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL. 
+                             // =0: XTLAIN( default mode )
+	                           // =1: other external clock source, which is pre-defined
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V5;
+
+#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL					0x01
+#define PIXEL_CLOCK_V5_MISC_VGA_MODE								0x02
+#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP              0x04
+#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP              0x08
+#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC             0x10
+
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+  union{
+    CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;    // pixel clock and CRTC id frequency 
+    ULONG ulDispEngClkFreq;                  // dispclk frequency
+  };
+  USHORT usFbDiv;            // feedback divider integer part. 
+  UCHAR  ucPostDiv;          // post divider. 
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
+                             // indicate which graphic encoder will be used. 
+  UCHAR  ucEncoderMode;      // Encoder mode: 
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
+                             // bit[1]= when VGA timing is used. 
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL. 
+                             // =0: XTLAIN( default mode )
+	                           // =1: other external clock source, which is pre-defined                                            
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL					0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE								0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
+{
+  PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
+{
+  UCHAR  ucStatus;
+  UCHAR  ucRefDivSrc;                 // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
+  UCHAR  ucReserved[2];
+}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
+{
+  PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+
+/****************************************************************************/	
+// Structures used by AdjustDisplayPllTable
+/****************************************************************************/	
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
+{
+	USHORT usPixelClock;
+	UCHAR ucTransmitterID;
+	UCHAR ucEncodeMode;
+	union
+	{
+		UCHAR ucDVOConfig;									//if DVO, need passing link rate and output 12bitlow or 24bit
+		UCHAR ucConfig;											//if none DVO, not defined yet
+	};
+	UCHAR ucReserved[3];
+}ADJUST_DISPLAY_PLL_PARAMETERS;
+
+#define ADJUST_DISPLAY_CONFIG_SS_ENABLE       0x10
+#define ADJUST_DISPLAY_PLL_PS_ALLOCATION			ADJUST_DISPLAY_PLL_PARAMETERS
+
+typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
+{
+	USHORT usPixelClock;                    // target pixel clock
+	UCHAR ucTransmitterID;                  // GPU transmitter id defined in objectid.h
+	UCHAR ucEncodeMode;                     // encoder mode: CRT, LVDS, DP, TMDS or HDMI
+  UCHAR ucDispPllConfig;                 // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
+  UCHAR ucExtTransmitterID;               // external encoder id.
+	UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
+
+// usDispPllConfig v1.2 for RoadRunner
+#define DISPPLL_CONFIG_DVO_RATE_SEL                0x0001     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_DDR_SPEED               0x0000     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_SDR_SPEED               0x0001     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_OUTPUT_SEL              0x000c     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_LOW12BIT                0x0000     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_UPPER12BIT              0x0004     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_24BIT                   0x0008     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_SS_ENABLE                   0x0010     // Only used when ucEncoderMode = DP or LVDS
+#define DISPPLL_CONFIG_COHERENT_MODE               0x0020     // Only used when ucEncoderMode = TMDS or HDMI
+#define DISPPLL_CONFIG_DUAL_LINK                   0x0040     // Only used when ucEncoderMode = TMDS or LVDS
+
+
+typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
+{
+  ULONG ulDispPllFreq;                 // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
+  UCHAR ucRefDiv;                      // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
+  UCHAR ucPostDiv;                     // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
+  UCHAR ucReserved[2];  
+}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
+
+typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
+{
+  union 
+  {
+    ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3  sInput;
+    ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
+  };
+} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
+
+/****************************************************************************/	
+// Structures used by EnableYUVTable
+/****************************************************************************/	
+typedef struct _ENABLE_YUV_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
+  UCHAR ucCRTC;                       // Which CRTC needs this YUV or RGB format
+  UCHAR ucPadding[2];
+}ENABLE_YUV_PARAMETERS;
+#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by GetMemoryClockTable
+/****************************************************************************/	
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS
+{
+  ULONG ulReturnMemoryClock;          // current memory speed in 10KHz unit
+} GET_MEMORY_CLOCK_PARAMETERS;
+#define GET_MEMORY_CLOCK_PS_ALLOCATION  GET_MEMORY_CLOCK_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by GetEngineClockTable
+/****************************************************************************/	
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS
+{
+  ULONG ulReturnEngineClock;          // current engine speed in 10KHz unit
+} GET_ENGINE_CLOCK_PARAMETERS;
+#define GET_ENGINE_CLOCK_PS_ALLOCATION  GET_ENGINE_CLOCK_PARAMETERS
+
+/****************************************************************************/	
+// Following Structures and constant may be obsolete
+/****************************************************************************/	
+//Maxium 8 bytes,the data read in will be placed in the parameter space.
+//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  USHORT    usVRAMAddress;      //Address in Frame Buffer where to pace raw EDID
+  USHORT    usStatus;           //When use output: lower byte EDID checksum, high byte hardware status
+                                //WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte
+  UCHAR     ucSlaveAddr;        //Read from which slave
+  UCHAR     ucLineNumber;       //Read from which HW assisted line
+}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION  READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
+
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE                  0
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES              1
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK       2
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK  3
+#define  ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK       4
+
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  USHORT    usByteOffset;       //Write to which byte
+                                //Upper portion of usByteOffset is Format of data 
+                                //1bytePS+offsetPS
+                                //2bytesPS+offsetPS
+                                //blockID+offsetPS
+                                //blockID+offsetID
+                                //blockID+counterID+offsetID
+  UCHAR     ucData;             //PS data1
+  UCHAR     ucStatus;           //Status byte 1=success, 2=failure, Also is used as PS data2
+  UCHAR     ucSlaveAddr;        //Write to which slave
+  UCHAR     ucLineNumber;       //Write from which HW assisted line
+}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+
+#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION  WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  UCHAR     ucSlaveAddr;        //Write to which slave
+  UCHAR     ucLineNumber;       //Write from which HW assisted line
+}SET_UP_HW_I2C_DATA_PARAMETERS;
+
+
+/**************************************************************************/
+#define SPEED_FAN_CONTROL_PS_ALLOCATION   WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+/****************************************************************************/	
+// Structures used by PowerConnectorDetectionTable
+/****************************************************************************/	
+typedef struct	_POWER_CONNECTOR_DETECTION_PARAMETERS
+{
+  UCHAR   ucPowerConnectorStatus;      //Used for return value 0: detected, 1:not detected
+	UCHAR   ucPwrBehaviorId;							
+	USHORT	usPwrBudget;								 //how much power currently boot to in unit of watt
+}POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
+{                               
+  UCHAR   ucPowerConnectorStatus;      //Used for return value 0: detected, 1:not detected
+	UCHAR   ucReserved;
+	USHORT	usPwrBudget;								 //how much power currently boot to in unit of watt
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION    sReserved;
+}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+
+/****************************LVDS SS Command Table Definitions**********************/
+
+/****************************************************************************/	
+// Structures used by EnableSpreadSpectrumOnPPLLTable
+/****************************************************************************/	
+typedef struct	_ENABLE_LVDS_SS_PARAMETERS
+{
+  USHORT  usSpreadSpectrumPercentage;       
+  UCHAR   ucSpreadSpectrumType;           //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
+  UCHAR   ucEnable;                       //ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucPadding[3];
+}ENABLE_LVDS_SS_PARAMETERS;
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct	_ENABLE_LVDS_SS_PARAMETERS_V2
+{
+  USHORT  usSpreadSpectrumPercentage;       
+  UCHAR   ucSpreadSpectrumType;           //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStep;           //
+  UCHAR   ucEnable;                       //ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucSpreadSpectrumDelay;
+  UCHAR   ucSpreadSpectrumRange;
+  UCHAR   ucPadding;
+}ENABLE_LVDS_SS_PARAMETERS_V2;
+
+//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
+typedef struct	_ENABLE_SPREAD_SPECTRUM_ON_PPLL
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;           // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStep;           //
+  UCHAR   ucEnable;                       // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucSpreadSpectrumDelay;
+  UCHAR   ucSpreadSpectrumRange;
+  UCHAR   ucPpll;												  // ATOM_PPLL1/ATOM_PPLL2
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;	        // Bit[0]: 0-Down Spread,1-Center Spread. 
+                                        // Bit[1]: 1-Ext. 0-Int. 
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;	                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;      	// Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
+  USHORT  usSpreadSpectrumStep;	        // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
+
+#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V2_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V2_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V2_DCPLL            0x08
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT    8
+
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+  USHORT  usSpreadSpectrumAmountFrac;   // SS_AMOUNT_DSFRAC New in DCE5.0
+  UCHAR   ucSpreadSpectrumType;	        // Bit[0]: 0-Down Spread,1-Center Spread. 
+                                        // Bit[1]: 1-Ext. 0-Int. 
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;	                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;      	// Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
+  USHORT  usSpreadSpectrumStep;	        // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+    
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL            0x08
+#define ATOM_PPLL_SS_TYPE_V3_P0PLL            ATOM_PPLL_SS_TYPE_V3_DCPLL
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT    8
+
+#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION  ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+/**************************************************************************/
+
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
+{
+  PIXEL_CLOCK_PARAMETERS sPCLKInput;
+  ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion 
+}SET_PIXEL_CLOCK_PS_ALLOCATION;
+
+#define ENABLE_VGA_RENDER_PS_ALLOCATION   SET_PIXEL_CLOCK_PS_ALLOCATION
+
+/****************************************************************************/	
+// Structures used by ###
+/****************************************************************************/	
+typedef struct	_MEMORY_TRAINING_PARAMETERS
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+}MEMORY_TRAINING_PARAMETERS;
+#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
+
+/****************************LVDS and other encoder command table definitions **********************/
+
+
+/****************************************************************************/	
+// Structures used by LVDSEncoderControlTable   (Before DCE30)
+//                    LVTMAEncoderControlTable  (Before DCE30)
+//                    TMDSAEncoderControlTable  (Before DCE30)
+/****************************************************************************/	
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;  // in 10KHz; for bios convenient
+  UCHAR  ucMisc;        // bit0=0: Enable single link
+                        //     =1: Enable dual link
+                        // Bit1=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucAction;      // 0: turn off encoder
+                        // 1: setup and turn on encoder
+}LVDS_ENCODER_CONTROL_PARAMETERS;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION  LVDS_ENCODER_CONTROL_PARAMETERS
+   
+#define TMDS1_ENCODER_CONTROL_PARAMETERS    LVDS_ENCODER_CONTROL_PARAMETERS
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS    TMDS1_ENCODER_CONTROL_PARAMETERS
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
+
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
+{
+  USHORT usPixelClock;  // in 10KHz; for bios convenient
+  UCHAR  ucMisc;        // see PANEL_ENCODER_MISC_xx defintions below
+  UCHAR  ucAction;      // 0: turn off encoder
+                        // 1: setup and turn on encoder
+  UCHAR  ucTruncate;    // bit0=0: Disable truncate
+                        //     =1: Enable truncate
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucSpatial;     // bit0=0: Disable spatial dithering
+                        //     =1: Enable spatial dithering
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucTemporal;    // bit0=0: Disable temporal dithering
+                        //     =1: Enable temporal dithering
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+                        // bit5=0: Gray level 2
+                        //     =1: Gray level 4
+  UCHAR  ucFRC;         // bit4=0: 25FRC_SEL pattern E
+                        //     =1: 25FRC_SEL pattern F
+                        // bit6:5=0: 50FRC_SEL pattern A
+                        //       =1: 50FRC_SEL pattern B
+                        //       =2: 50FRC_SEL pattern C
+                        //       =3: 50FRC_SEL pattern D
+                        // bit7=0: 75FRC_SEL pattern E
+                        //     =1: 75FRC_SEL pattern F
+}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+   
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2    LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+  
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2    TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
+
+#define LVDS_ENCODER_CONTROL_PARAMETERS_V3     LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3  LVDS_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
+
+/****************************************************************************/	
+// Structures used by ###
+/****************************************************************************/	
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
+{                               
+  UCHAR    ucEnable;            // Enable or Disable External TMDS encoder
+  UCHAR    ucMisc;              // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
+  UCHAR    ucPadding[2];
+}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
+{                               
+  ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS    sXTmdsEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION   sReserved;     //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+
+#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
+{                               
+  ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2    sXTmdsEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION      sReserved;     //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
+{
+  DIG_ENCODER_CONTROL_PARAMETERS            sDigEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by DVOEncoderControlTable
+/****************************************************************************/	
+//ucTableFormatRevision=1,ucTableContentRevision=3
+
+//ucDVOConfig:
+#define DVO_ENCODER_CONFIG_RATE_SEL							0x01
+#define DVO_ENCODER_CONFIG_DDR_SPEED						0x00
+#define DVO_ENCODER_CONFIG_SDR_SPEED						0x01
+#define DVO_ENCODER_CONFIG_OUTPUT_SEL						0x0c
+#define DVO_ENCODER_CONFIG_LOW12BIT							0x00
+#define DVO_ENCODER_CONFIG_UPPER12BIT						0x04
+#define DVO_ENCODER_CONFIG_24BIT								0x08
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
+{
+  USHORT usPixelClock; 
+  UCHAR  ucDVOConfig;
+  UCHAR  ucAction;														//ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  UCHAR  ucReseved[4];
+}DVO_ENCODER_CONTROL_PARAMETERS_V3;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3	DVO_ENCODER_CONTROL_PARAMETERS_V3
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for 
+// bit1=0: non-coherent mode
+//     =1: coherent mode
+
+//==========================================================================================
+//Only change is here next time when changing encoder parameter definitions again!
+#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST     LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST  LVDS_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define DVO_ENCODER_CONTROL_PARAMETERS_LAST      DVO_ENCODER_CONTROL_PARAMETERS
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST   DVO_ENCODER_CONTROL_PS_ALLOCATION
+
+//==========================================================================================
+#define PANEL_ENCODER_MISC_DUAL                0x01
+#define PANEL_ENCODER_MISC_COHERENT            0x02
+#define	PANEL_ENCODER_MISC_TMDS_LINKB					 0x04
+#define	PANEL_ENCODER_MISC_HDMI_TYPE					 0x08
+
+#define PANEL_ENCODER_ACTION_DISABLE           ATOM_DISABLE
+#define PANEL_ENCODER_ACTION_ENABLE            ATOM_ENABLE
+#define PANEL_ENCODER_ACTION_COHERENTSEQ       (ATOM_ENABLE+1)
+
+#define PANEL_ENCODER_TRUNCATE_EN              0x01
+#define PANEL_ENCODER_TRUNCATE_DEPTH           0x10
+#define PANEL_ENCODER_SPATIAL_DITHER_EN        0x01
+#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH     0x10
+#define PANEL_ENCODER_TEMPORAL_DITHER_EN       0x01
+#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH    0x10
+#define PANEL_ENCODER_TEMPORAL_LEVEL_4         0x20
+#define PANEL_ENCODER_25FRC_MASK               0x10
+#define PANEL_ENCODER_25FRC_E                  0x00
+#define PANEL_ENCODER_25FRC_F                  0x10
+#define PANEL_ENCODER_50FRC_MASK               0x60
+#define PANEL_ENCODER_50FRC_A                  0x00
+#define PANEL_ENCODER_50FRC_B                  0x20
+#define PANEL_ENCODER_50FRC_C                  0x40
+#define PANEL_ENCODER_50FRC_D                  0x60
+#define PANEL_ENCODER_75FRC_MASK               0x80
+#define PANEL_ENCODER_75FRC_E                  0x00
+#define PANEL_ENCODER_75FRC_F                  0x80
+
+/****************************************************************************/	
+// Structures used by SetVoltageTable
+/****************************************************************************/	
+#define SET_VOLTAGE_TYPE_ASIC_VDDC             1
+#define SET_VOLTAGE_TYPE_ASIC_MVDDC            2
+#define SET_VOLTAGE_TYPE_ASIC_MVDDQ            3
+#define SET_VOLTAGE_TYPE_ASIC_VDDCI            4
+#define SET_VOLTAGE_INIT_MODE                  5
+#define SET_VOLTAGE_GET_MAX_VOLTAGE            6					//Gets the Max. voltage for the soldered Asic
+
+#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE       0x1
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_A         0x2
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_B         0x4
+
+#define	SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE      0x0
+#define	SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL      0x1	
+#define	SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK     0x2
+
+typedef struct	_SET_VOLTAGE_PARAMETERS
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+  UCHAR    ucVoltageMode;               // To set all, to set source A or source B or ...
+  UCHAR    ucVoltageIndex;              // An index to tell which voltage level
+  UCHAR    ucReserved;          
+}SET_VOLTAGE_PARAMETERS;
+
+typedef struct	_SET_VOLTAGE_PARAMETERS_V2
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+  UCHAR    ucVoltageMode;               // Not used, maybe use for state machine for differen power mode
+  USHORT   usVoltageLevel;              // real voltage level
+}SET_VOLTAGE_PARAMETERS_V2;
+
+
+typedef struct	_SET_VOLTAGE_PARAMETERS_V1_3
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Indicate action: Set voltage level
+  USHORT   usVoltageLevel;              // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
+}SET_VOLTAGE_PARAMETERS_V1_3;
+
+//ucVoltageType
+#define VOLTAGE_TYPE_VDDC                    1
+#define VOLTAGE_TYPE_MVDDC                   2
+#define VOLTAGE_TYPE_MVDDQ                   3
+#define VOLTAGE_TYPE_VDDCI                   4
+
+//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
+#define ATOM_SET_VOLTAGE                     0        //Set voltage Level
+#define ATOM_INIT_VOLTAGE_REGULATOR          3        //Init Regulator
+#define ATOM_SET_VOLTAGE_PHASE               4        //Set Vregulator Phase
+#define ATOM_GET_MAX_VOLTAGE                 6        //Get Max Voltage, not used in SetVoltageTable v1.3
+#define ATOM_GET_VOLTAGE_LEVEL               6        //Get Voltage level from vitual voltage ID
+
+// define vitual voltage id in usVoltageLevel
+#define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
+#define ATOM_VIRTUAL_VOLTAGE_ID1             0xff02
+#define ATOM_VIRTUAL_VOLTAGE_ID2             0xff03
+#define ATOM_VIRTUAL_VOLTAGE_ID3             0xff04
+
+typedef struct _SET_VOLTAGE_PS_ALLOCATION
+{
+  SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}SET_VOLTAGE_PS_ALLOCATION;
+
+// New Added from SI for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id 
+  ULONG    ulReserved;
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
+typedef struct  _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  ULONG    ulVotlageGpioState;
+  ULONG    ulVoltageGPioMask;
+}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
+typedef struct  _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  USHORT   usVoltageLevel;
+  USHORT   usVoltageId;                                  // Voltage Id programmed in Voltage Regulator
+  ULONG    ulReseved;
+}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+
+// GetVoltageInfo v1.1 ucVoltageMode
+#define	ATOM_GET_VOLTAGE_VID                0x00
+#define ATOM_GET_VOTLAGE_INIT_SEQ           0x03
+#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID   0x04
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define	ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+
+// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
+#define	ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
+// undefined power state
+#define	ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
+#define	ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
+
+/****************************************************************************/	
+// Structures used by TVEncoderControlTable
+/****************************************************************************/	
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10KHz; for bios convenient
+  UCHAR  ucTvStandard;                // See definition "ATOM_TV_NTSC ..."
+  UCHAR  ucAction;                    // 0: turn off encoder
+                                      // 1: setup and turn on encoder
+}TV_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
+{
+  TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;          
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION    sReserved; // Don't set this one
+}TV_ENCODER_CONTROL_PS_ALLOCATION;
+
+//==============================Data Table Portion====================================
+
+/****************************************************************************/	
+// Structure used in Data.mtb
+/****************************************************************************/	
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+{
+  USHORT        UtilityPipeLine;	        // Offest for the utility to get parser info,Don't change this position!
+  USHORT        MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios 
+  USHORT        MultimediaConfigInfo;     // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+  USHORT        StandardVESA_Timing;      // Only used by Bios
+  USHORT        FirmwareInfo;             // Shared by various SW components,latest version 1.4
+  USHORT        PaletteData;              // Only used by BIOS
+  USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info 
+  USHORT        DIGTransmitterInfo;       // Internal used by VBIOS only version 3.1
+  USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1 
+  USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
+  USHORT        GPIO_I2C_Info;            // Shared by various SW components,latest version 1.2 will be used from R600           
+  USHORT        VRAM_UsageByFirmware;     // Shared by various SW components,latest version 1.3 will be used from R600
+  USHORT        GPIO_Pin_LUT;             // Shared by various SW components,latest version 1.1
+  USHORT        VESA_ToInternalModeLUT;   // Only used by Bios
+  USHORT        ComponentVideoInfo;       // Shared by various SW components,latest version 2.1 will be used from R600
+  USHORT        PowerPlayInfo;            // Shared by various SW components,latest version 2.1,new design from R600
+  USHORT        CompassionateData;        // Will be obsolete from R600
+  USHORT        SaveRestoreInfo;          // Only used by Bios
+  USHORT        PPLL_SS_Info;             // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
+  USHORT        OemInfo;                  // Defined and used by external SW, should be obsolete soon
+  USHORT        XTMDS_Info;               // Will be obsolete from R600
+  USHORT        MclkSS_Info;              // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
+  USHORT        Object_Header;            // Shared by various SW components,latest version 1.1
+  USHORT        IndirectIOAccess;         // Only used by Bios,this table position can't change at all!!
+  USHORT        MC_InitParameter;         // Only used by command table
+  USHORT        ASIC_VDDC_Info;						// Will be obsolete from R600
+  USHORT        ASIC_InternalSS_Info;			// New tabel name from R600, used to be called "ASIC_MVDDC_Info"
+  USHORT        TV_VideoMode;							// Only used by command table
+  USHORT        VRAM_Info;								// Only used by command table, latest version 1.3
+  USHORT        MemoryTrainingInfo;				// Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
+  USHORT        IntegratedSystemInfo;			// Shared by various SW components
+  USHORT        ASIC_ProfilingInfo;				// New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
+  USHORT        VoltageObjectInfo;				// Shared by various SW components, latest version 1.1
+	USHORT				PowerSourceInfo;					// Shared by various SW components, latest versoin 1.1
+}ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+typedef struct _ATOM_MASTER_DATA_TABLE
+{ 
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_MASTER_LIST_OF_DATA_TABLES   ListOfDataTables;
+}ATOM_MASTER_DATA_TABLE;
+
+// For backward compatible 
+#define LVDS_Info                LCD_Info
+#define DAC_Info                 PaletteData
+#define TMDS_Info                DIGTransmitterInfo
+
+/****************************************************************************/	
+// Structure used in MultimediaCapabilityInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ULONG                    ulSignature;      // HW info table signature string "$ATI"
+  UCHAR                    ucI2C_Type;       // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
+  UCHAR                    ucTV_OutInfo;     // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
+  UCHAR                    ucVideoPortInfo;  // Provides the video port capabilities
+  UCHAR                    ucHostPortInfo;   // Provides host port configuration information
+}ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/	
+// Structure used in MultimediaConfigInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ULONG                    ulSignature;      // MM info table signature sting "$MMT"
+  UCHAR                    ucTunerInfo;      // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
+  UCHAR                    ucAudioChipInfo;  // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
+  UCHAR                    ucProductID;      // Defines as OEM ID or ATI board ID dependent on product type setting
+  UCHAR                    ucMiscInfo1;      // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
+  UCHAR                    ucMiscInfo2;      // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
+  UCHAR                    ucMiscInfo3;      // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
+  UCHAR                    ucMiscInfo4;      // Video Decoder Host Config (2:0) reserved (7:3)
+  UCHAR                    ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+}ATOM_MULTIMEDIA_CONFIG_INFO;
+
+
+/****************************************************************************/	
+// Structures used in FirmwareInfoTable
+/****************************************************************************/	
+
+// usBIOSCapability Definition:
+// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; 
+// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; 
+// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; 
+// Others: Reserved
+#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED         0x0001
+#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT            0x0002
+#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT     0x0004
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT      0x0008		// (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable. 
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT      0x0010		// (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable. 
+#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU         0x0020
+#define ATOM_BIOS_INFO_WMI_SUPPORT                  0x0040
+#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM   0x0080
+#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT          0x0100
+#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK        0x1E00
+#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
+#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE  0x4000
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT  0x0008		// (valid from v2.1 ): =1: memclk ss enable with external ss chip
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT  0x0010		// (valid from v2.1 ): =1: engclk ss enable with external ss chip
+
+#ifndef _H2INC
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_FIRMWARE_CAPABILITY
+{
+#if ATOM_BIG_ENDIAN
+  USHORT Reserved:1;
+  USHORT SCL2Redefined:1;
+  USHORT PostWithoutModeSet:1;
+  USHORT HyperMemory_Size:4;
+  USHORT HyperMemory_Support:1;
+  USHORT PPMode_Assigned:1;
+  USHORT WMI_SUPPORT:1;
+  USHORT GPUControlsBL:1;
+  USHORT EngineClockSS_Support:1;
+  USHORT MemoryClockSS_Support:1;
+  USHORT ExtendedDesktopSupport:1;
+  USHORT DualCRTC_Support:1;
+  USHORT FirmwarePosted:1;
+#else
+  USHORT FirmwarePosted:1;
+  USHORT DualCRTC_Support:1;
+  USHORT ExtendedDesktopSupport:1;
+  USHORT MemoryClockSS_Support:1;
+  USHORT EngineClockSS_Support:1;
+  USHORT GPUControlsBL:1;
+  USHORT WMI_SUPPORT:1;
+  USHORT PPMode_Assigned:1;
+  USHORT HyperMemory_Support:1;
+  USHORT HyperMemory_Size:4;
+  USHORT PostWithoutModeSet:1;
+  USHORT SCL2Redefined:1;
+  USHORT Reserved:1;
+#endif
+}ATOM_FIRMWARE_CAPABILITY;
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+  ATOM_FIRMWARE_CAPABILITY sbfAccess;
+  USHORT                   susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#else
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+  USHORT                   susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#endif
+
+typedef struct _ATOM_FIRMWARE_INFO
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucPadding[3];               //Don't use them
+  ULONG                           aulReservedForBIOS[3];      //Don't use them
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit, the definitions above can't change!!!
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  UCHAR                           ucPadding[2];               //Don't use them
+  ULONG                           aulReservedForBIOS[2];      //Don't use them
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  UCHAR                           ucPadding[2];               //Don't use them
+  ULONG                           aulReservedForBIOS;         //Don't use them
+  ULONG                           ul3DAccelerationEngineClock;//In 10Khz unit
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ul3DAccelerationEngineClock;//In 10Khz unit
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_4;
+
+//the structure below to be used from Cypress
+typedef struct _ATOM_FIRMWARE_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulReserved1;
+  ULONG                           ulReserved2;
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit
+  UCHAR                           ucReserved1;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit	
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit	
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved4[3];
+}ATOM_FIRMWARE_INFO_V2_1;
+
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulReserved[2];
+  ULONG                           ulReserved1;                //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulReserved2;                //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock  ?
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.          
+  UCHAR                           ucReserved3;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  UCHAR                           ucRemoteDisplayConfig;
+  UCHAR                           ucReserved5[3];             //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+  ULONG                           ulReserved6;                //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+  ULONG                           ulReserved7;                //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+  USHORT                          usReserved11;               //Was usMaxPixelClock;  //In 10Khz unit, Max.  Pclk used only for DAC
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usBootUpVDDCIVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit	
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit	
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved9[3];
+  USHORT                          usBootUpMVDDCVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  USHORT                          usReserved12;
+  ULONG                           ulReserved10[3];            // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
+
+#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_2
+
+
+// definition of ucRemoteDisplayConfig
+#define REMOTE_DISPLAY_DISABLE                   0x00
+#define REMOTE_DISPLAY_ENABLE                    0x01
+
+/****************************************************************************/	
+// Structures used in IntegratedSystemInfoTable
+/****************************************************************************/	
+#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN      0x2
+#define IGP_CAP_FLAG_AC_CARD               0x4
+#define IGP_CAP_FLAG_SDVO_CARD             0x8
+#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE     0x10
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG	                          ulBootUpEngineClock;		    //in 10kHz unit
+  ULONG	                          ulBootUpMemoryClock;		    //in 10kHz unit
+  ULONG	                          ulMaxSystemMemoryClock;	    //in 10kHz unit
+  ULONG	                          ulMinSystemMemoryClock;	    //in 10kHz unit
+  UCHAR                           ucNumberOfCyclesInPeriodHi;
+  UCHAR                           ucLCDTimingSel;             //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
+  USHORT                          usReserved1;
+  USHORT                          usInterNBVoltageLow;        //An intermidiate PMW value to set the voltage 
+  USHORT                          usInterNBVoltageHigh;       //Another intermidiate PMW value to set the voltage 
+  ULONG	                          ulReserved[2];
+
+  USHORT	                        usFSBClock;			            //In MHz unit
+  USHORT                          usCapabilityFlag;		        //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
+																                              //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
+                                                              //Bit[4]==1: P/2 mode, ==0: P/1 mode
+  USHORT	                        usPCIENBCfgReg7;				    //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
+  USHORT	                        usK8MemoryClock;            //in MHz unit
+  USHORT	                        usK8SyncStartDelay;         //in 0.01 us unit
+  USHORT	                        usK8DataReturnTime;         //in 0.01 us unit
+  UCHAR                           ucMaxNBVoltage;
+  UCHAR                           ucMinNBVoltage;
+  UCHAR                           ucMemoryType;					      //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
+  UCHAR                           ucNumberOfCyclesInPeriod;		//CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod 
+  UCHAR                           ucStartingPWM_HighTime;     //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
+  UCHAR                           ucHTLinkWidth;              //16 bit vs. 8 bit
+  UCHAR                           ucMaxNBVoltageHigh;    
+  UCHAR                           ucMinNBVoltageHigh;
+}ATOM_INTEGRATED_SYSTEM_INFO;
+
+/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
+ulBootUpMemoryClock:    For Intel IGP,it's the UMA system memory clock 
+                        For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
+ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+                        For AMD IGP,for now this can be 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 
+                        For AMD IGP,for now this can be 0
+
+usFSBClock:             For Intel IGP,it's FSB Freq 
+                        For AMD IGP,it's HT Link Speed
+
+usK8MemoryClock:        For AMD IGP only. For RevF CPU, set it to 200
+usK8SyncStartDelay:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+usK8DataReturnTime:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+
+VC:Voltage Control
+ucMaxNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+ucNumberOfCyclesInPeriod:   Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. 
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 
+
+ucMaxNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of  the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+
+usInterNBVoltageLow:    Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageHigh:   Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
+*/
+
+
+/*
+The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. 
+The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
+
+SW components can access the IGP system infor structure in the same way as before
+*/
+
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG	                     ulBootUpEngineClock;       //in 10kHz unit
+  ULONG			     ulReserved1[2];            //must be 0x0 for the reserved
+  ULONG	                     ulBootUpUMAClock;          //in 10kHz unit
+  ULONG	                     ulBootUpSidePortClock;     //in 10kHz unit
+  ULONG	                     ulMinSidePortClock;        //in 10kHz unit
+  ULONG			     ulReserved2[6];            //must be 0x0 for the reserved
+  ULONG                      ulSystemConfig;            //see explanation below
+  ULONG                      ulBootUpReqDisplayVector;
+  ULONG                      ulOtherDisplayMisc;
+  ULONG                      ulDDISlot1Config;
+  ULONG                      ulDDISlot2Config;
+  UCHAR                      ucMemoryType;              //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+  UCHAR                      ucUMAChannelNumber;
+  UCHAR                      ucDockingPinBit;
+  UCHAR                      ucDockingPinPolarity;
+  ULONG                      ulDockingPinCFGInfo;
+  ULONG                      ulCPUCapInfo;
+  USHORT                     usNumberOfCyclesInPeriod;
+  USHORT                     usMaxNBVoltage;
+  USHORT                     usMinNBVoltage;
+  USHORT                     usBootUpNBVoltage;
+  ULONG                      ulHTLinkFreq;              //in 10Khz
+  USHORT                     usMinHTLinkWidth;
+  USHORT                     usMaxHTLinkWidth;
+  USHORT                     usUMASyncStartDelay;
+  USHORT                     usUMADataReturnTime;
+  USHORT                     usLinkStatusZeroTime;
+  USHORT                     usDACEfuse;				//for storing badgap value (for RS880 only)
+  ULONG                      ulHighVoltageHTLinkFreq;     // in 10Khz
+  ULONG                      ulLowVoltageHTLinkFreq;      // in 10Khz
+  USHORT                     usMaxUpStreamHTLinkWidth;
+  USHORT                     usMaxDownStreamHTLinkWidth;
+  USHORT                     usMinUpStreamHTLinkWidth;
+  USHORT                     usMinDownStreamHTLinkWidth;
+  USHORT                     usFirmwareVersion;         //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
+  USHORT                     usFullT0Time;             // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
+  ULONG                      ulReserved3[96];          //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V2;   
+
+/*
+ulBootUpEngineClock:   Boot-up Engine Clock in 10Khz;
+ulBootUpUMAClock:      Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
+ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
+
+ulSystemConfig:  
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; 
+Bit[1]=1: system boots up at AMD overdrived state or user customized  mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
+      =0: system boots up at driver control state. Power state depends on PowerPlay table.
+Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
+Bit[3]=1: Only one power state(Performance) will be supported.
+      =0: Multiple power states supported from PowerPlay table.
+Bit[4]=1: CLMC is supported and enabled on current system. 
+      =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.  
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.  
+      =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
+Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
+      =0: Voltage settings is determined by powerplay table.
+Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
+      =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+Bit[8]=1: CDLF is supported and enabled on current system.
+      =0: CDLF is not supported or enabled on current system.
+Bit[9]=1: DLL Shut Down feature is enabled on current system.
+      =0: DLL Shut Down feature is not enabled or supported on current system.
+
+ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
+
+ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
+			              [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
+
+ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
+      [3:0]  - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+			[7:4]  - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
+      When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
+      in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
+      one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
+
+			[15:8] - Lane configuration attribute; 
+      [23:16]- Connector type, possible value:
+               CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_HDMI_TYPE_A
+               CONNECTOR_OBJECT_ID_DISPLAYPORT
+               CONNECTOR_OBJECT_ID_eDP
+			[31:24]- Reserved
+
+ulDDISlot2Config: Same as Slot1.
+ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
+For IGP, Hypermemory is the only memory type showed in CCC.
+
+ucUMAChannelNumber:  how many channels for the UMA;
+
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin 
+ucDockingPinBit:     which bit in this register to read the pin status;
+ucDockingPinPolarity:Polarity of the pin when docked;
+
+ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
+
+usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
+
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. 
+usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
+                    GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
+                    PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
+                    GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+
+usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
+
+ulHTLinkFreq:       Bootup HT link Frequency in 10Khz.
+usMinHTLinkWidth:   Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. 
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth:   Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. 
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.  
+
+usUMASyncStartDelay: Memory access latency, required for watermark calculation 
+usUMADataReturnTime: Memory access latency, required for watermark calculation
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us 
+for Griffin or Greyhound. SBIOS needs to convert to actual time by:
+                     if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
+                     if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
+                     if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
+                     if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
+
+ulHighVoltageHTLinkFreq:     HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHTLinkFreq(bootup frequency). 
+ulLowVoltageHTLinkFreq:      HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHighVoltageHTLinkFreq.
+
+usMaxUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
+usMaxDownStreamHTLinkWidth:  same as above.
+usMinUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
+usMinDownStreamHTLinkWidth:  same as above.
+*/
+
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo  - CPU type definition 
+#define    INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU             0
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN        1
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND      2
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__K8             3
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH        4
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI         5
+
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI    // this deff reflects max defined CPU code
+
+#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
+#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE                  0x00000004 
+#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY         0x00000008
+#define SYSTEM_CONFIG_CLMC_ENABLED                        0x00000010
+#define SYSTEM_CONFIG_CDLW_ENABLED                        0x00000020
+#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED              0x00000040
+#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED            0x00000080
+#define SYSTEM_CONFIG_CDLF_ENABLED                        0x00000100
+#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED                0x00000200
+
+#define IGP_DDI_SLOT_LANE_CONFIG_MASK                     0x000000FF
+
+#define b0IGP_DDI_SLOT_LANE_MAP_MASK                      0x0F
+#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK              0xF0
+#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3                    0x01
+#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7                    0x02
+#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11                   0x04
+#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15                  0x08
+
+#define IGP_DDI_SLOT_ATTRIBUTE_MASK                       0x0000FF00
+#define IGP_DDI_SLOT_CONFIG_REVERSED                      0x00000100
+#define b1IGP_DDI_SLOT_CONFIG_REVERSED                    0x01
+
+#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK                  0x00FF0000
+
+// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG	                     ulBootUpEngineClock;       //in 10kHz unit
+  ULONG                      ulDentistVCOFreq;          //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK. 
+  ULONG                      ulLClockFreq;              //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
+  ULONG	                     ulBootUpUMAClock;          //in 10kHz unit
+  ULONG                      ulReserved1[8];            //must be 0x0 for the reserved
+  ULONG                      ulBootUpReqDisplayVector;
+  ULONG                      ulOtherDisplayMisc;
+  ULONG                      ulReserved2[4];            //must be 0x0 for the reserved
+  ULONG                      ulSystemConfig;            //TBD
+  ULONG                      ulCPUCapInfo;              //TBD
+  USHORT                     usMaxNBVoltage;            //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+  USHORT                     usMinNBVoltage;            //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+  USHORT                     usBootUpNBVoltage;         //boot up NB voltage
+  UCHAR                      ucHtcTmpLmt;               //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
+  UCHAR                      ucTjOffset;                //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
+  ULONG                      ulReserved3[4];            //must be 0x0 for the reserved
+  ULONG                      ulDDISlot1Config;          //see above ulDDISlot1Config definition
+  ULONG                      ulDDISlot2Config;
+  ULONG                      ulDDISlot3Config;
+  ULONG                      ulDDISlot4Config;
+  ULONG                      ulReserved4[4];            //must be 0x0 for the reserved
+  UCHAR                      ucMemoryType;              //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+  UCHAR                      ucUMAChannelNumber;
+  USHORT                     usReserved;
+  ULONG                      ulReserved5[4];            //must be 0x0 for the reserved
+  ULONG                      ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
+  ULONG                      ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
+  ULONG                      ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
+  ULONG                      ulReserved6[61];           //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V5;   
+
+#define ATOM_CRT_INT_ENCODER1_INDEX                       0x00000000
+#define ATOM_LCD_INT_ENCODER1_INDEX                       0x00000001
+#define ATOM_TV_INT_ENCODER1_INDEX                        0x00000002
+#define ATOM_DFP_INT_ENCODER1_INDEX                       0x00000003
+#define ATOM_CRT_INT_ENCODER2_INDEX                       0x00000004
+#define ATOM_LCD_EXT_ENCODER1_INDEX                       0x00000005
+#define ATOM_TV_EXT_ENCODER1_INDEX                        0x00000006
+#define ATOM_DFP_EXT_ENCODER1_INDEX                       0x00000007
+#define ATOM_CV_INT_ENCODER1_INDEX                        0x00000008
+#define ATOM_DFP_INT_ENCODER2_INDEX                       0x00000009
+#define ATOM_CRT_EXT_ENCODER1_INDEX                       0x0000000A
+#define ATOM_CV_EXT_ENCODER1_INDEX                        0x0000000B
+#define ATOM_DFP_INT_ENCODER3_INDEX                       0x0000000C
+#define ATOM_DFP_INT_ENCODER4_INDEX                       0x0000000D
+
+// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
+#define ASIC_INT_DAC1_ENCODER_ID    											0x00 
+#define ASIC_INT_TV_ENCODER_ID														0x02
+#define ASIC_INT_DIG1_ENCODER_ID													0x03
+#define ASIC_INT_DAC2_ENCODER_ID													0x04
+#define ASIC_EXT_TV_ENCODER_ID														0x06
+#define ASIC_INT_DVO_ENCODER_ID														0x07
+#define ASIC_INT_DIG2_ENCODER_ID													0x09
+#define ASIC_EXT_DIG_ENCODER_ID														0x05
+#define ASIC_EXT_DIG2_ENCODER_ID													0x08
+#define ASIC_INT_DIG3_ENCODER_ID													0x0a
+#define ASIC_INT_DIG4_ENCODER_ID													0x0b
+#define ASIC_INT_DIG5_ENCODER_ID													0x0c
+#define ASIC_INT_DIG6_ENCODER_ID													0x0d
+#define ASIC_INT_DIG7_ENCODER_ID													0x0e
+
+//define Encoder attribute
+#define ATOM_ANALOG_ENCODER																0
+#define ATOM_DIGITAL_ENCODER															1		
+#define ATOM_DP_ENCODER															      2		
+
+#define ATOM_ENCODER_ENUM_MASK                            0x70
+#define ATOM_ENCODER_ENUM_ID1                             0x00
+#define ATOM_ENCODER_ENUM_ID2                             0x10
+#define ATOM_ENCODER_ENUM_ID3                             0x20
+#define ATOM_ENCODER_ENUM_ID4                             0x30
+#define ATOM_ENCODER_ENUM_ID5                             0x40 
+#define ATOM_ENCODER_ENUM_ID6                             0x50
+
+#define ATOM_DEVICE_CRT1_INDEX                            0x00000000
+#define ATOM_DEVICE_LCD1_INDEX                            0x00000001
+#define ATOM_DEVICE_TV1_INDEX                             0x00000002
+#define ATOM_DEVICE_DFP1_INDEX                            0x00000003
+#define ATOM_DEVICE_CRT2_INDEX                            0x00000004
+#define ATOM_DEVICE_LCD2_INDEX                            0x00000005
+#define ATOM_DEVICE_DFP6_INDEX                            0x00000006
+#define ATOM_DEVICE_DFP2_INDEX                            0x00000007
+#define ATOM_DEVICE_CV_INDEX                              0x00000008
+#define ATOM_DEVICE_DFP3_INDEX                            0x00000009
+#define ATOM_DEVICE_DFP4_INDEX                            0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX                            0x0000000B
+
+#define ATOM_DEVICE_RESERVEDC_INDEX                       0x0000000C
+#define ATOM_DEVICE_RESERVEDD_INDEX                       0x0000000D
+#define ATOM_DEVICE_RESERVEDE_INDEX                       0x0000000E
+#define ATOM_DEVICE_RESERVEDF_INDEX                       0x0000000F
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO                    (ATOM_DEVICE_DFP3_INDEX+1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2                  ATOM_MAX_SUPPORTED_DEVICE_INFO
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3                  (ATOM_DEVICE_DFP5_INDEX + 1 )
+
+#define ATOM_MAX_SUPPORTED_DEVICE                         (ATOM_DEVICE_RESERVEDF_INDEX+1)
+
+#define ATOM_DEVICE_CRT1_SUPPORT                          (0x1L << ATOM_DEVICE_CRT1_INDEX )
+#define ATOM_DEVICE_LCD1_SUPPORT                          (0x1L << ATOM_DEVICE_LCD1_INDEX )
+#define ATOM_DEVICE_TV1_SUPPORT                           (0x1L << ATOM_DEVICE_TV1_INDEX  )
+#define ATOM_DEVICE_DFP1_SUPPORT                          (0x1L << ATOM_DEVICE_DFP1_INDEX )
+#define ATOM_DEVICE_CRT2_SUPPORT                          (0x1L << ATOM_DEVICE_CRT2_INDEX )
+#define ATOM_DEVICE_LCD2_SUPPORT                          (0x1L << ATOM_DEVICE_LCD2_INDEX )
+#define ATOM_DEVICE_DFP6_SUPPORT                          (0x1L << ATOM_DEVICE_DFP6_INDEX )
+#define ATOM_DEVICE_DFP2_SUPPORT                          (0x1L << ATOM_DEVICE_DFP2_INDEX )
+#define ATOM_DEVICE_CV_SUPPORT                            (0x1L << ATOM_DEVICE_CV_INDEX   )
+#define ATOM_DEVICE_DFP3_SUPPORT                          (0x1L << ATOM_DEVICE_DFP3_INDEX )
+#define ATOM_DEVICE_DFP4_SUPPORT                          (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT                          (0x1L << ATOM_DEVICE_DFP5_INDEX )
+
+#define ATOM_DEVICE_CRT_SUPPORT                           (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT                           (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT |  ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT                            (ATOM_DEVICE_TV1_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT                           (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+
+#define ATOM_DEVICE_CONNECTOR_TYPE_MASK                   0x000000F0
+#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT                  0x00000004
+#define ATOM_DEVICE_CONNECTOR_VGA                         0x00000001
+#define ATOM_DEVICE_CONNECTOR_DVI_I                       0x00000002
+#define ATOM_DEVICE_CONNECTOR_DVI_D                       0x00000003
+#define ATOM_DEVICE_CONNECTOR_DVI_A                       0x00000004
+#define ATOM_DEVICE_CONNECTOR_SVIDEO                      0x00000005
+#define ATOM_DEVICE_CONNECTOR_COMPOSITE                   0x00000006
+#define ATOM_DEVICE_CONNECTOR_LVDS                        0x00000007
+#define ATOM_DEVICE_CONNECTOR_DIGI_LINK                   0x00000008
+#define ATOM_DEVICE_CONNECTOR_SCART                       0x00000009
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A                 0x0000000A
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B                 0x0000000B
+#define ATOM_DEVICE_CONNECTOR_CASE_1                      0x0000000E
+#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT                 0x0000000F
+
+
+#define ATOM_DEVICE_DAC_INFO_MASK                         0x0000000F
+#define ATOM_DEVICE_DAC_INFO_SHIFT                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_NODAC                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_DACA                         0x00000001
+#define ATOM_DEVICE_DAC_INFO_DACB                         0x00000002
+#define ATOM_DEVICE_DAC_INFO_EXDAC                        0x00000003
+
+#define ATOM_DEVICE_I2C_ID_NOI2C                          0x00000000
+
+#define ATOM_DEVICE_I2C_LINEMUX_MASK                      0x0000000F
+#define ATOM_DEVICE_I2C_LINEMUX_SHIFT                     0x00000000
+
+#define ATOM_DEVICE_I2C_ID_MASK                           0x00000070
+#define ATOM_DEVICE_I2C_ID_SHIFT                          0x00000004
+#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE              0x00000001
+#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE                  0x00000002
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE                0x00000003    //For IGP RS600
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL                 0x00000004    //For IGP RS690
+
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK                 0x00000080
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT                0x00000007
+#define	ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C            0x00000000
+#define	ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C            0x00000001
+
+//  usDeviceSupport:
+//  Bits0	= 0 - no CRT1 support= 1- CRT1 is supported
+//  Bit 1	= 0 - no LCD1 support= 1- LCD1 is supported
+//  Bit 2	= 0 - no TV1  support= 1- TV1  is supported
+//  Bit 3	= 0 - no DFP1 support= 1- DFP1 is supported
+//  Bit 4	= 0 - no CRT2 support= 1- CRT2 is supported
+//  Bit 5	= 0 - no LCD2 support= 1- LCD2 is supported
+//  Bit 6	= 0 - no DFP6 support= 1- DFP6 is supported
+//  Bit 7	= 0 - no DFP2 support= 1- DFP2 is supported
+//  Bit 8	= 0 - no CV   support= 1- CV   is supported
+//  Bit 9	= 0 - no DFP3 support= 1- DFP3 is supported
+//  Bit 10      = 0 - no DFP4 support= 1- DFP4 is supported
+//  Bit 11      = 0 - no DFP5 support= 1- DFP5 is supported
+//   
+//  
+
+/****************************************************************************/
+/* Structure used in MclkSS_InfoTable                                       */
+/****************************************************************************/
+//		ucI2C_ConfigID
+//    [7:0] - I2C LINE Associate ID
+//          = 0   - no I2C
+//    [7]		-	HW_Cap        =	1,  [6:0]=HW assisted I2C ID(HW line selection)
+//                          =	0,  [6:0]=SW assisted I2C ID
+//    [6-4]	- HW_ENGINE_ID  =	1,  HW engine for NON multimedia use
+//                          =	2,	HW engine for Multimedia use
+//                          =	3-7	Reserved for future I2C engines
+//		[3-0] - I2C_LINE_MUX  = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
+
+typedef struct _ATOM_I2C_ID_CONFIG
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR   bfHW_Capable:1;
+  UCHAR   bfHW_EngineID:3;
+  UCHAR   bfI2C_LineMux:4;
+#else
+  UCHAR   bfI2C_LineMux:4;
+  UCHAR   bfHW_EngineID:3;
+  UCHAR   bfHW_Capable:1;
+#endif
+}ATOM_I2C_ID_CONFIG;
+
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS
+{
+  ATOM_I2C_ID_CONFIG sbfAccess;
+  UCHAR              ucAccess;
+}ATOM_I2C_ID_CONFIG_ACCESS;
+   
+
+/****************************************************************************/	
+// Structure used in GPIO_I2C_InfoTable
+/****************************************************************************/	
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT
+{
+  USHORT                    usClkMaskRegisterIndex;
+  USHORT                    usClkEnRegisterIndex;
+  USHORT                    usClkY_RegisterIndex;
+  USHORT                    usClkA_RegisterIndex;
+  USHORT                    usDataMaskRegisterIndex;
+  USHORT                    usDataEnRegisterIndex;
+  USHORT                    usDataY_RegisterIndex;
+  USHORT                    usDataA_RegisterIndex;
+  ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+  UCHAR                     ucClkMaskShift;
+  UCHAR                     ucClkEnShift;
+  UCHAR                     ucClkY_Shift;
+  UCHAR                     ucClkA_Shift;
+  UCHAR                     ucDataMaskShift;
+  UCHAR                     ucDataEnShift;
+  UCHAR                     ucDataY_Shift;
+  UCHAR                     ucDataA_Shift;
+  UCHAR                     ucReserved1;
+  UCHAR                     ucReserved2;
+}ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_GPIO_I2C_ASSIGMENT   asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/	
+// Common Structure used in other structures
+/****************************************************************************/	
+
+#ifndef _H2INC
+  
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_MODE_MISC_INFO
+{ 
+#if ATOM_BIG_ENDIAN
+  USHORT Reserved:6;
+  USHORT RGB888:1;
+  USHORT DoubleClock:1;
+  USHORT Interlace:1;
+  USHORT CompositeSync:1;
+  USHORT V_ReplicationBy2:1;
+  USHORT H_ReplicationBy2:1;
+  USHORT VerticalCutOff:1;
+  USHORT VSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT HSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT HorizontalCutOff:1;
+#else
+  USHORT HorizontalCutOff:1;
+  USHORT HSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT VSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT VerticalCutOff:1;
+  USHORT H_ReplicationBy2:1;
+  USHORT V_ReplicationBy2:1;
+  USHORT CompositeSync:1;
+  USHORT Interlace:1;
+  USHORT DoubleClock:1;
+  USHORT RGB888:1;
+  USHORT Reserved:6;           
+#endif
+}ATOM_MODE_MISC_INFO;
+  
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{ 
+  ATOM_MODE_MISC_INFO sbfAccess;
+  USHORT              usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+  
+#else
+  
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{ 
+  USHORT              usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+   
+#endif
+
+// usModeMiscInfo-
+#define ATOM_H_CUTOFF           0x01
+#define ATOM_HSYNC_POLARITY     0x02             //0=Active High, 1=Active Low
+#define ATOM_VSYNC_POLARITY     0x04             //0=Active High, 1=Active Low
+#define ATOM_V_CUTOFF           0x08
+#define ATOM_H_REPLICATIONBY2   0x10
+#define ATOM_V_REPLICATIONBY2   0x20
+#define ATOM_COMPOSITESYNC      0x40
+#define ATOM_INTERLACE          0x80
+#define ATOM_DOUBLE_CLOCK_MODE  0x100
+#define ATOM_RGB888_MODE        0x200
+
+//usRefreshRate-
+#define ATOM_REFRESH_43         43
+#define ATOM_REFRESH_47         47
+#define ATOM_REFRESH_56         56	
+#define ATOM_REFRESH_60         60
+#define ATOM_REFRESH_65         65
+#define ATOM_REFRESH_70         70
+#define ATOM_REFRESH_72         72
+#define ATOM_REFRESH_75         75
+#define ATOM_REFRESH_85         85
+
+// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
+// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
+//
+//	VESA_HTOTAL			=	VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
+//						=	EDID_HA + EDID_HBL
+//	VESA_HDISP			=	VESA_ACTIVE	=	EDID_HA
+//	VESA_HSYNC_START	=	VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
+//						=	EDID_HA + EDID_HSO
+//	VESA_HSYNC_WIDTH	=	VESA_HSYNC_TIME	=	EDID_HSPW
+//	VESA_BORDER			=	EDID_BORDER
+
+/****************************************************************************/	
+// Structure used in SetCRTC_UsingDTDTimingTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
+{
+  USHORT  usH_Size;
+  USHORT  usH_Blanking_Time;
+  USHORT  usV_Size;
+  USHORT  usV_Blanking_Time;			
+  USHORT  usH_SyncOffset;
+  USHORT  usH_SyncWidth;
+  USHORT  usV_SyncOffset;
+  USHORT  usV_SyncWidth;
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;  
+  UCHAR   ucH_Border;         // From DFP EDID
+  UCHAR   ucV_Border;
+  UCHAR   ucCRTC;             // ATOM_CRTC1 or ATOM_CRTC2  
+  UCHAR   ucPadding[3];
+}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/	
+// Structure used in SetCRTC_TimingTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_TIMING_PARAMETERS
+{
+  USHORT                      usH_Total;        // horizontal total
+  USHORT                      usH_Disp;         // horizontal display
+  USHORT                      usH_SyncStart;    // horozontal Sync start
+  USHORT                      usH_SyncWidth;    // horizontal Sync width
+  USHORT                      usV_Total;        // vertical total
+  USHORT                      usV_Disp;         // vertical display
+  USHORT                      usV_SyncStart;    // vertical Sync start
+  USHORT                      usV_SyncWidth;    // vertical Sync width
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  UCHAR                       ucCRTC;           // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR                       ucOverscanRight;  // right
+  UCHAR                       ucOverscanLeft;   // left
+  UCHAR                       ucOverscanBottom; // bottom
+  UCHAR                       ucOverscanTop;    // top
+  UCHAR                       ucReserved;
+}SET_CRTC_TIMING_PARAMETERS;
+#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
+
+/****************************************************************************/	
+// Structure used in StandardVESA_TimingTable
+//                   AnalogTV_InfoTable 
+//                   ComponentVideoInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MODE_TIMING
+{
+  USHORT  usCRTC_H_Total;
+  USHORT  usCRTC_H_Disp;
+  USHORT  usCRTC_H_SyncStart;
+  USHORT  usCRTC_H_SyncWidth;
+  USHORT  usCRTC_V_Total;
+  USHORT  usCRTC_V_Disp;
+  USHORT  usCRTC_V_SyncStart;
+  USHORT  usCRTC_V_SyncWidth;
+  USHORT  usPixelClock;					                 //in 10Khz unit
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  USHORT  usCRTC_OverscanRight;
+  USHORT  usCRTC_OverscanLeft;
+  USHORT  usCRTC_OverscanBottom;
+  USHORT  usCRTC_OverscanTop;
+  USHORT  usReserve;
+  UCHAR   ucInternalModeNumber;
+  UCHAR   ucRefreshRate;
+}ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT
+{
+  USHORT  usPixClk;
+  USHORT  usHActive;
+  USHORT  usHBlanking_Time;
+  USHORT  usVActive;
+  USHORT  usVBlanking_Time;			
+  USHORT  usHSyncOffset;
+  USHORT  usHSyncWidth;
+  USHORT  usVSyncOffset;
+  USHORT  usVSyncWidth;
+  USHORT  usImageHSize;
+  USHORT  usImageVSize;
+  UCHAR   ucHBorder;
+  UCHAR   ucVBorder;
+  ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+  UCHAR   ucInternalModeNumber;
+  UCHAR   ucRefreshRate;
+}ATOM_DTD_FORMAT;
+
+/****************************************************************************/	
+// Structure used in LVDS_InfoTable 
+//  * Need a document to describe this table
+/****************************************************************************/	
+#define SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+#define SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+#define SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+#define SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_LVDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usModePatchTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  USHORT              usOffDelayInMs;
+  UCHAR               ucPowerSequenceDigOntoDEin10Ms;
+  UCHAR               ucPowerSequenceDEtoBLOnin10Ms;
+  UCHAR               ucLVDS_Misc;               // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+                                                 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+                                                 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+                                                 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+}ATOM_LVDS_INFO;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_LVDS_INFO_V12
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  USHORT              usOffDelayInMs;
+  UCHAR               ucPowerSequenceDigOntoDEin10Ms;
+  UCHAR               ucPowerSequenceDEtoBLOnin10Ms;
+  UCHAR               ucLVDS_Misc;               // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+                                                 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+                                                 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+                                                 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap; 
+	UCHAR								ucPanelInfoSize;					//  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  UCHAR               ucReserved[2];
+}ATOM_LVDS_INFO_V12;
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
+#define	LCDPANEL_CAP_READ_EDID                  0x1
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates at usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define	LCDPANEL_CAP_DRR_SUPPORTED              0x2
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define	LCDPANEL_CAP_eDP                        0x4
+
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+
+#define PANEL_COLOR_BIT_DEPTH_MASK    0x70
+
+// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}   
+#define PANEL_RANDOM_DITHER   0x80
+#define PANEL_RANDOM_DITHER_MASK   0x80
+
+#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12   // no need to change this 
+
+/****************************************************************************/	
+// Structures used by LCD_InfoTable V1.3    Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families:  NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+typedef struct _ATOM_LCD_INFO_V13
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  ULONG               ulReserved0;
+  UCHAR               ucLCD_Misc;                // Reorganized in V13
+                                                 // Bit0: {=0:single, =1:dual},
+                                                 // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888}  // was {=0:666RGB, =1:888RGB},
+                                                 // Bit3:2: {Grey level}
+                                                 // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) 
+                                                 // Bit7   Reserved.  was for ATOM_PANEL_MISC_API_ENABLED, still need it?  
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap;  // Reorganized in V13 
+                                                 // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+                                                 // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+                                                 // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+                                                 // Bit7-3: Reserved 
+  UCHAR               ucPanelInfoSize;					 //  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  USHORT              usBacklightPWM;            //  Backlight PWM in Hz. New in _V13
+
+  UCHAR               ucPowerSequenceDIGONtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoVARY_BL_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
+
+  UCHAR               ucOffDelay_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoBLON_in4Ms;
+  UCHAR               ucPowerSequenceBLONtoVARY_BL_in4Ms;
+  UCHAR               ucReserved1;
+
+  UCHAR               ucDPCD_eDP_CONFIGURATION_CAP;     // dpcd 0dh
+  UCHAR               ucDPCD_MAX_LINK_RATE;             // dpcd 01h
+  UCHAR               ucDPCD_MAX_LANE_COUNT;            // dpcd 02h
+  UCHAR               ucDPCD_MAX_DOWNSPREAD;            // dpcd 03h
+
+  USHORT              usMaxPclkFreqInSingleLink;        // Max PixelClock frequency in single link mode. 
+  UCHAR               uceDPToLVDSRxId;
+  UCHAR               ucLcdReservd;
+  ULONG               ulReserved[2];
+}ATOM_LCD_INFO_V13;  
+
+#define ATOM_LCD_INFO_LAST  ATOM_LCD_INFO_V13    
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL                   0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI                   0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL             0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT       2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK   0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR         0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR         0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+ 
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
+#define	LCDPANEL_CAP_V13_READ_EDID              0x1        // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates at usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define	LCDPANEL_CAP_V13_DRR_SUPPORTED          0x2        // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define	LCDPANEL_CAP_V13_eDP                    0x4        // = LCDPANEL_CAP_eDP no change comparing to previous version
+
+//uceDPToLVDSRxId
+#define eDP_TO_LVDS_RX_DISABLE                  0x00       // no eDP->LVDS translator chip 
+#define eDP_TO_LVDS_COMMON_ID                   0x01       // common eDP->LVDS translator chip without AMD SW init
+#define eDP_TO_LVDS_RT_ID                       0x02       // RT tanslator which require AMD SW init
+
+typedef struct  _ATOM_PATCH_RECORD_MODE
+{
+  UCHAR     ucRecordType;
+  USHORT    usHDisp;
+  USHORT    usVDisp;
+}ATOM_PATCH_RECORD_MODE;
+
+typedef struct  _ATOM_LCD_RTS_RECORD
+{
+  UCHAR     ucRecordType;
+  UCHAR     ucRTSValue;
+}ATOM_LCD_RTS_RECORD;
+
+//!! If the record below exits, it shoud always be the first record for easy use in command table!!! 
+// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
+typedef struct  _ATOM_LCD_MODE_CONTROL_CAP
+{
+  UCHAR     ucRecordType;
+  USHORT    usLCDCap;
+}ATOM_LCD_MODE_CONTROL_CAP;
+
+#define LCD_MODE_CAP_BL_OFF                   1
+#define LCD_MODE_CAP_CRTC_OFF                 2
+#define LCD_MODE_CAP_PANEL_OFF                4
+
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
+{
+  UCHAR ucRecordType;
+  UCHAR ucFakeEDIDLength;
+  UCHAR ucFakeEDIDString[1];    // This actually has ucFakeEdidLength elements.
+} ATOM_FAKE_EDID_PATCH_RECORD;
+
+typedef struct  _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+{
+   UCHAR    ucRecordType;
+   USHORT		usHSize;
+   USHORT		usVSize;
+}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+
+#define LCD_MODE_PATCH_RECORD_MODE_TYPE       1
+#define LCD_RTS_RECORD_TYPE                   2
+#define LCD_CAP_RECORD_TYPE                   3
+#define LCD_FAKE_EDID_PATCH_RECORD_TYPE       4
+#define LCD_PANEL_RESOLUTION_RECORD_TYPE      5
+#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE     6
+#define ATOM_RECORD_END_TYPE                  0xFF
+
+/****************************Spread Spectrum Info Table Definitions **********************/
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
+{
+  USHORT              usSpreadSpectrumPercentage; 
+  UCHAR               ucSpreadSpectrumType;	    //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS  Others:TBD
+  UCHAR               ucSS_Step;
+  UCHAR               ucSS_Delay;
+  UCHAR               ucSS_Id;
+  UCHAR               ucRecommendedRef_Div;
+  UCHAR               ucSS_Range;               //it was reserved for V11
+}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+
+#define ATOM_MAX_SS_ENTRY                      16
+#define ATOM_DP_SS_ID1												 0x0f1			// SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well. 
+#define ATOM_DP_SS_ID2												 0x0f2			// SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable. 
+#define ATOM_LVLINK_2700MHz_SS_ID              0x0f3      // SS ID for LV link translator chip at 2.7Ghz
+#define ATOM_LVLINK_1620MHz_SS_ID              0x0f4      // SS ID for LV link translator chip at 1.62Ghz
+
+
+#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+#define ATOM_INTERNAL_SS_MASK                  0x00000000
+#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+#define EXEC_SS_STEP_SIZE_SHIFT                2
+#define EXEC_SS_DELAY_SHIFT                    4    
+#define ACTIVEDATA_TO_BLON_DELAY_SHIFT         4
+
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_SPREAD_SPECTRUM_ASSIGNMENT   asSS_Info[ATOM_MAX_SS_ENTRY];
+}ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/	
+// Structure used in AnalogTV_InfoTable (Top level)
+/****************************************************************************/	
+//ucTVBootUpDefaultStd definition:
+
+//ATOM_TV_NTSC                1
+//ATOM_TV_NTSCJ               2
+//ATOM_TV_PAL                 3
+//ATOM_TV_PALM                4
+//ATOM_TV_PALCN               5
+//ATOM_TV_PALN                6
+//ATOM_TV_PAL60               7
+//ATOM_TV_SECAM               8
+
+//ucTVSupportedStd definition:
+#define NTSC_SUPPORT          0x1
+#define NTSCJ_SUPPORT         0x2
+
+#define PAL_SUPPORT           0x4
+#define PALM_SUPPORT          0x8
+#define PALCN_SUPPORT         0x10
+#define PALN_SUPPORT          0x20
+#define PAL60_SUPPORT         0x40
+#define SECAM_SUPPORT         0x80
+
+#define MAX_SUPPORTED_TV_TIMING    2
+
+typedef struct _ATOM_ANALOG_TV_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    ucTV_SupportedStandard;
+  UCHAR                    ucTV_BootUpDefaultStandard; 
+  UCHAR                    ucExt_TV_ASIC_ID;
+  UCHAR                    ucExt_TV_ASIC_SlaveAddr;
+  /*ATOM_DTD_FORMAT          aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
+  ATOM_MODE_TIMING         aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO;
+
+#define MAX_SUPPORTED_TV_TIMING_V1_2    3
+
+typedef struct _ATOM_ANALOG_TV_INFO_V1_2
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    ucTV_SupportedStandard;
+  UCHAR                    ucTV_BootUpDefaultStandard; 
+  UCHAR                    ucExt_TV_ASIC_ID;
+  UCHAR                    ucExt_TV_ASIC_SlaveAddr;
+  ATOM_DTD_FORMAT          aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
+}ATOM_ANALOG_TV_INFO_V1_2;
+
+typedef struct _ATOM_DPCD_INFO
+{
+  UCHAR   ucRevisionNumber;        //10h : Revision 1.0; 11h : Revision 1.1   
+  UCHAR   ucMaxLinkRate;           //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
+  UCHAR   ucMaxLane;               //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP 
+  UCHAR   ucMaxDownSpread;         //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
+}ATOM_DPCD_INFO;
+
+#define ATOM_DPCD_MAX_LANE_MASK    0x1F
+
+/**************************************************************************/
+// VRAM usage and their defintions
+
+// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
+// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
+// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
+// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
+// To Bios:  ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX 
+
+#ifndef VESA_MEMORY_IN_64K_BLOCK
+#define VESA_MEMORY_IN_64K_BLOCK        0x100       //256*64K=16Mb (Max. VESA memory is 16Mb!)
+#endif
+
+#define ATOM_EDID_RAW_DATASIZE          256         //In Bytes
+#define ATOM_HWICON_SURFACE_SIZE        4096        //In Bytes
+#define ATOM_HWICON_INFOTABLE_SIZE      32
+#define MAX_DTD_MODE_IN_VRAM            6
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE  (MAX_DTD_MODE_IN_VRAM*28)    //28= (SIZEOF ATOM_DTD_FORMAT) 
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE  32*8                         //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET         (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)    
+#define ATOM_DP_DPCD_OFFSET             (DFP_ENCODER_TYPE_OFFSET + 4 )        
+
+#define ATOM_HWICON1_SURFACE_ADDR       0
+#define ATOM_HWICON2_SURFACE_ADDR       (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_HWICON_INFOTABLE_ADDR      (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_CRT1_EDID_ADDR             (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
+#define ATOM_CRT1_DTD_MODE_TBL_ADDR     (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT1_STD_MODE_TBL_ADDR	    (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD1_EDID_ADDR             (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_DTD_MODE_TBL_ADDR     (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR   	(ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV1_DTD_MODE_TBL_ADDR      (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP1_EDID_ADDR             (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP1_DTD_MODE_TBL_ADDR     (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP1_STD_MODE_TBL_ADDR	    (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CRT2_EDID_ADDR             (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CRT2_DTD_MODE_TBL_ADDR     (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT2_STD_MODE_TBL_ADDR	    (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD2_EDID_ADDR             (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_DTD_MODE_TBL_ADDR     (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR   	(ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP6_EDID_ADDR             (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_DTD_MODE_TBL_ADDR     (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP6_STD_MODE_TBL_ADDR     (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP2_EDID_ADDR             (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_DTD_MODE_TBL_ADDR     (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP2_STD_MODE_TBL_ADDR     (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CV_EDID_ADDR               (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CV_DTD_MODE_TBL_ADDR       (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CV_STD_MODE_TBL_ADDR       (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP3_EDID_ADDR             (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP3_DTD_MODE_TBL_ADDR     (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP3_STD_MODE_TBL_ADDR     (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP4_EDID_ADDR             (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP4_DTD_MODE_TBL_ADDR     (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP4_STD_MODE_TBL_ADDR     (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP5_EDID_ADDR             (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP5_DTD_MODE_TBL_ADDR     (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP5_STD_MODE_TBL_ADDR     (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DP_TRAINING_TBL_ADDR       (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR + 1024)       
+#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START + 512        
+
+//The size below is in Kb!
+#define ATOM_VRAM_RESERVE_SIZE         ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+   
+#define ATOM_VRAM_RESERVE_V2_SIZE      32
+
+#define	ATOM_VRAM_OPERATION_FLAGS_MASK         0xC0000000L
+#define ATOM_VRAM_OPERATION_FLAGS_SHIFT        30
+#define	ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION   0x1
+#define	ATOM_VRAM_BLOCK_NEEDS_RESERVATION      0x0
+
+/***********************************************************************************/	
+// Structure used in VRAM_UsageByFirmwareTable
+// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
+//        at running time.   
+// note2: From RV770, the memory is more than 32bit addressable, so we will change 
+//        ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains 
+//        exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware 
+//        (in offset to start of memory address) is KB aligned instead of byte aligend.
+/***********************************************************************************/	
+// Note3:
+/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
+for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can  have:
+
+If (ulStartAddrUsedByFirmware!=0)
+FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
+Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
+else	//Non VGA case
+ if (FB_Size<=2Gb)
+    FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
+ else
+	  FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
+
+CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
+/***********************************************************************************/	
+#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO			1
+
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+{
+  ULONG   ulStartAddrUsedByFirmware;
+  USHORT  usFirmwareUseInKb;
+  USHORT  usReserved;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_FIRMWARE_VRAM_RESERVE_INFO	asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE;
+
+// change verion to 1.5, when allow driver to allocate the vram area for command table access. 
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
+{
+  ULONG   ulStartAddrUsedByFirmware;
+  USHORT  usFirmwareUseInKb;
+  USHORT  usFBUsedByDrvInKb;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5	asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
+
+/****************************************************************************/	
+// Structure used in GPIO_Pin_LUTTable
+/****************************************************************************/	
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
+{
+  USHORT                   usGpioPin_AIndex;
+  UCHAR                    ucGpioPinBitShift;
+  UCHAR                    ucGPIO_ID;
+}ATOM_GPIO_PIN_ASSIGNMENT;
+
+typedef struct _ATOM_GPIO_PIN_LUT
+{
+  ATOM_COMMON_TABLE_HEADER  sHeader;
+  ATOM_GPIO_PIN_ASSIGNMENT	asGPIO_Pin[1];
+}ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/	
+// Structure used in ComponentVideoInfoTable	
+/****************************************************************************/	
+#define GPIO_PIN_ACTIVE_HIGH          0x1
+
+#define MAX_SUPPORTED_CV_STANDARDS    5
+
+// definitions for ATOM_D_INFO.ucSettings
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK  0x1F    // [4:0]
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK  0x60    // [6:5] = must be zeroed out
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK    0x80    // [7]
+
+typedef struct _ATOM_GPIO_INFO
+{
+  USHORT  usAOffset;
+  UCHAR   ucSettings;
+  UCHAR   ucReserved;
+}ATOM_GPIO_INFO;
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
+#define ATOM_CV_RESTRICT_FORMAT_SELECTION           0x2
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
+#define ATOM_GPIO_DEFAULT_MODE_EN                   0x80 //[7];
+#define ATOM_GPIO_SETTING_PERMODE_MASK              0x7F //[6:0]
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
+//Line 3 out put 5V.
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A       0x01     //represent gpio 3 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B       0x02     //represent gpio 4 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT   0x0   
+
+//Line 3 out put 2.2V              
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04     //represent gpio 3 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08     //represent gpio 4 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2     
+
+//Line 3 out put 0V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A        0x10     //represent gpio 3 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B        0x20     //represent gpio 4 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT    0x4 
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK              0x3F     // bit [5:0]
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST             0x80     //bit 7
+
+//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A   3   //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B   4   //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT             usMask_PinRegisterIndex;
+  USHORT             usEN_PinRegisterIndex;
+  USHORT             usY_PinRegisterIndex;
+  USHORT             usA_PinRegisterIndex;
+  UCHAR              ucBitShift;
+  UCHAR              ucPinActiveState;  //ucPinActiveState: Bit0=1 active high, =0 active low
+  ATOM_DTD_FORMAT    sReserved;         // must be zeroed out
+  UCHAR              ucMiscInfo;
+  UCHAR              uc480i;
+  UCHAR              uc480p;
+  UCHAR              uc720p;
+  UCHAR              uc1080i;
+  UCHAR              ucLetterBoxMode;
+  UCHAR              ucReserved[3];
+  UCHAR              ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+  ATOM_GPIO_INFO     aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+  ATOM_DTD_FORMAT    aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR              ucMiscInfo;
+  UCHAR              uc480i;
+  UCHAR              uc480p;
+  UCHAR              uc720p;
+  UCHAR              uc1080i;
+  UCHAR              ucReserved;
+  UCHAR              ucLetterBoxMode;
+  UCHAR              ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+  ATOM_GPIO_INFO     aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+  ATOM_DTD_FORMAT    aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO_V21;
+
+#define ATOM_COMPONENT_VIDEO_INFO_LAST  ATOM_COMPONENT_VIDEO_INFO_V21
+
+/****************************************************************************/	
+// Structure used in object_InfoTable
+/****************************************************************************/	
+typedef struct _ATOM_OBJECT_HEADER
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  USHORT                    usConnectorObjectTableOffset;
+  USHORT                    usRouterObjectTableOffset;
+  USHORT                    usEncoderObjectTableOffset;
+  USHORT                    usProtectionObjectTableOffset; //only available when Protection block is independent.
+  USHORT                    usDisplayPathTableOffset;
+}ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_OBJECT_HEADER_V3
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  USHORT                    usConnectorObjectTableOffset;
+  USHORT                    usRouterObjectTableOffset;
+  USHORT                    usEncoderObjectTableOffset;
+  USHORT                    usProtectionObjectTableOffset; //only available when Protection block is independent.
+  USHORT                    usDisplayPathTableOffset;
+  USHORT                    usMiscObjectTableOffset;
+}ATOM_OBJECT_HEADER_V3;
+
+typedef struct  _ATOM_DISPLAY_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device 
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID 
+  USHORT    usGPUObjectId;                                 //GPU ID 
+  USHORT    usGraphicObjIds[1];                             //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+}ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct  _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device 
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID 
+  USHORT    usGPUObjectId;                                 //GPU ID 
+  USHORT    usGraphicObjIds[2];                            //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder 
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
+{
+  UCHAR                           ucNumOfDispPath;
+  UCHAR                           ucVersion;
+  UCHAR                           ucPadding[2];
+  ATOM_DISPLAY_OBJECT_PATH        asDispPath[1];
+}ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+
+typedef struct _ATOM_OBJECT                                //each object has this structure    
+{
+  USHORT              usObjectID;
+  USHORT              usSrcDstTableOffset;
+  USHORT              usRecordOffset;                     //this pointing to a bunch of records defined below
+  USHORT              usReserved;
+}ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE                         //Above 4 object table offset pointing to a bunch of objects all have this structure     
+{
+  UCHAR               ucNumberOfObjects;
+  UCHAR               ucPadding[3];
+  ATOM_OBJECT         asObjects[1];
+}ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT         //usSrcDstTableOffset pointing to this structure
+{
+  UCHAR               ucNumberOfSrc;
+  USHORT              usSrcObjectID[1];
+  UCHAR               ucNumberOfDst;
+  USHORT              usDstObjectID[1];
+}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+
+//Two definitions below are for OPM on MXM module designs
+
+#define EXT_HPDPIN_LUTINDEX_0                   0
+#define EXT_HPDPIN_LUTINDEX_1                   1
+#define EXT_HPDPIN_LUTINDEX_2                   2
+#define EXT_HPDPIN_LUTINDEX_3                   3
+#define EXT_HPDPIN_LUTINDEX_4                   4
+#define EXT_HPDPIN_LUTINDEX_5                   5
+#define EXT_HPDPIN_LUTINDEX_6                   6
+#define EXT_HPDPIN_LUTINDEX_7                   7
+#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES   (EXT_HPDPIN_LUTINDEX_7+1)
+
+#define EXT_AUXDDC_LUTINDEX_0                   0
+#define EXT_AUXDDC_LUTINDEX_1                   1
+#define EXT_AUXDDC_LUTINDEX_2                   2
+#define EXT_AUXDDC_LUTINDEX_3                   3
+#define EXT_AUXDDC_LUTINDEX_4                   4
+#define EXT_AUXDDC_LUTINDEX_5                   5
+#define EXT_AUXDDC_LUTINDEX_6                   6
+#define EXT_AUXDDC_LUTINDEX_7                   7
+#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES   (EXT_AUXDDC_LUTINDEX_7+1)
+
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS 
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDP_Lane3_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane0_Source:2;
+#else
+  UCHAR ucDP_Lane0_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping. 
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDVI_CLK_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA2_Source:2;
+#else
+  UCHAR ucDVI_DATA2_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
+typedef struct _EXT_DISPLAY_PATH
+{
+  USHORT  usDeviceTag;                    //A bit vector to show what devices are supported 
+  USHORT  usDeviceACPIEnum;               //16bit device ACPI id. 
+  USHORT  usDeviceConnector;              //A physical connector for displays to plug in, using object connector definitions
+  UCHAR   ucExtAUXDDCLutIndex;            //An index into external AUX/DDC channel LUT
+  UCHAR   ucExtHPDPINLutIndex;            //An index into external HPD pin LUT
+  USHORT  usExtEncoderObjId;              //external encoder object id
+  union{
+    UCHAR   ucChannelMapping;                  // if ucChannelMapping=0, using default one to one mapping
+    ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+    ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+  };
+  UCHAR   ucChPNInvert;                   // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
+  USHORT  usCaps;
+  USHORT  usReserved; 
+}EXT_DISPLAY_PATH;
+   
+#define NUMBER_OF_UCHAR_FOR_GUID          16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH    7
+
+//usCaps
+#define  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE          0x01
+
+typedef  struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR                    ucGuid [NUMBER_OF_UCHAR_FOR_GUID];     // a GUID is a 16 byte long string
+  EXT_DISPLAY_PATH         sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+  UCHAR                    ucChecksum;                            // a  simple Checksum of the sum of whole structure equal to 0x0. 
+  UCHAR                    uc3DStereoPinId;                       // use for eDP panel
+  UCHAR                    ucRemoteDisplayConfig;
+  UCHAR                    uceDPToLVDSRxId;
+  UCHAR                    Reserved[4];                           // for potential expansion
+}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+//Related definitions, all records are different but they have a commond header
+typedef struct _ATOM_COMMON_RECORD_HEADER
+{
+  UCHAR               ucRecordType;                      //An emun to indicate the record type
+  UCHAR               ucRecordSize;                      //The size of the whole record in byte
+}ATOM_COMMON_RECORD_HEADER;
+
+
+#define ATOM_I2C_RECORD_TYPE                           1         
+#define ATOM_HPD_INT_RECORD_TYPE                       2
+#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE             3
+#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE          4
+#define	ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE	     5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE          6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE      7
+#define ATOM_JTAG_RECORD_TYPE                          8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE              9
+#define ATOM_ENCODER_DVO_CF_RECORD_TYPE               10
+#define ATOM_CONNECTOR_CF_RECORD_TYPE                 11
+#define	ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE	      12
+#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE  13
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE	      14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE	15
+#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE          16 //This is for the case when connectors are not known to object table
+#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE          17 //This is for the case when connectors are not known to object table
+#define ATOM_OBJECT_LINK_RECORD_TYPE                   18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
+#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
+#define ATOM_ENCODER_CAP_RECORD_TYPE                   20
+
+
+//Must be updated when new record type is added,equal to that record definition!
+#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_ENCODER_CAP_RECORD_TYPE
+
+typedef struct  _ATOM_I2C_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ATOM_I2C_ID_CONFIG          sucI2cId; 
+  UCHAR                       ucI2CAddr;              //The slave address, it's 0 when the record is attached to connector for DDC
+}ATOM_I2C_RECORD;
+
+typedef struct  _ATOM_HPD_INT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucHPDIntGPIOID;         //Corresponding block in GPIO_PIN_INFO table gives the pin info           
+  UCHAR                       ucPlugged_PinState;
+}ATOM_HPD_INT_RECORD;
+
+
+typedef struct  _ATOM_OUTPUT_PROTECTION_RECORD 
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucProtectionFlag;
+  UCHAR                       ucReserved;
+}ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_DEVICE_TAG
+{
+  ULONG                       ulACPIDeviceEnum;       //Reserved for now
+  USHORT                      usDeviceID;             //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
+  USHORT                      usPadding;
+}ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct  _ATOM_CONNECTOR_DEVICE_TAG_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucNumberOfDevice;
+  UCHAR                       ucReserved;
+  ATOM_CONNECTOR_DEVICE_TAG   asDeviceTag[1];         //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+
+typedef struct  _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR						            ucConfigGPIOID;
+  UCHAR						            ucConfigGPIOState;	    //Set to 1 when it's active high to enable external flow in
+  UCHAR                       ucFlowinGPIPID;
+  UCHAR                       ucExtInGPIPID;
+}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct  _ATOM_ENCODER_FPGA_CONTROL_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucCTL1GPIO_ID;
+  UCHAR                       ucCTL1GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTL2GPIO_ID;
+  UCHAR                       ucCTL2GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTL3GPIO_ID;
+  UCHAR                       ucCTL3GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTLFPGA_IN_ID;
+  UCHAR                       ucPadding[3];
+}ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucGPIOID;               //Corresponding block in GPIO_PIN_INFO table gives the pin info 
+  UCHAR                       ucTVActiveState;        //Indicating when the pin==0 or 1 when TV is connected
+}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct  _ATOM_JTAG_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucTMSGPIO_ID;
+  UCHAR                       ucTMSGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTCKGPIO_ID;
+  UCHAR                       ucTCKGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTDOGPIO_ID;
+  UCHAR                       ucTDOGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTDIGPIO_ID;
+  UCHAR                       ucTDIGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucPadding[2];
+}ATOM_JTAG_RECORD;
+
+
+//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
+{
+  UCHAR                       ucGPIOID;               // GPIO_ID, find the corresponding ID in GPIO_LUT table
+  UCHAR                       ucGPIO_PinState;        // Pin state showing how to set-up the pin
+}ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct  _ATOM_OBJECT_GPIO_CNTL_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucFlags;                // Future expnadibility
+  UCHAR                       ucNumberOfPins;         // Number of GPIO pins used to control the object
+  ATOM_GPIO_PIN_CONTROL_PAIR  asGpio[1];              // the real gpio pin pair determined by number of pins ucNumberOfPins
+}ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+//Definitions for GPIO pin state 
+#define GPIO_PIN_TYPE_INPUT             0x00
+#define GPIO_PIN_TYPE_OUTPUT            0x10
+#define GPIO_PIN_TYPE_HW_CONTROL        0x20
+
+//For GPIO_PIN_TYPE_OUTPUT the following is defined 
+#define GPIO_PIN_OUTPUT_STATE_MASK      0x01
+#define GPIO_PIN_OUTPUT_STATE_SHIFT     0
+#define GPIO_PIN_STATE_ACTIVE_LOW       0x0
+#define GPIO_PIN_STATE_ACTIVE_HIGH      0x1
+
+// Indexes to GPIO array in GLSync record 
+// GLSync record is for Frame Lock/Gen Lock feature.
+#define ATOM_GPIO_INDEX_GLSYNC_REFCLK    0
+#define ATOM_GPIO_INDEX_GLSYNC_HSYNC     1
+#define ATOM_GPIO_INDEX_GLSYNC_VSYNC     2
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ  3
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT  4
+#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+#define ATOM_GPIO_INDEX_GLSYNC_V_RESET   6
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL  8
+#define ATOM_GPIO_INDEX_GLSYNC_MAX       9
+
+typedef struct  _ATOM_ENCODER_DVO_CF_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ULONG                       ulStrengthControl;      // DVOA strength control for CF
+  UCHAR                       ucPadding[2];
+}ATOM_ENCODER_DVO_CF_RECORD;
+
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2                  0x01         // DP1.2 HBR2 is supported by HW encoder
+#define ATOM_ENCODER_CAP_RECORD_HBR2_EN               0x02         // DP1.2 HBR2 setting is qualified and HBR2 can be enabled 
+
+typedef struct  _ATOM_ENCODER_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  union {
+    USHORT                    usEncoderCap;         
+    struct {
+#if ATOM_BIG_ENDIAN
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+#else
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+#endif
+    };
+  }; 
+}ATOM_ENCODER_CAP_RECORD;                             
+
+// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
+
+typedef struct  _ATOM_CONNECTOR_CF_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usMaxPixClk;
+  UCHAR                       ucFlowCntlGpioId;
+  UCHAR                       ucSwapCntlGpioId;
+  UCHAR                       ucConnectedDvoBundle;
+  UCHAR                       ucPadding;
+}ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+	ATOM_DTD_FORMAT							asTiming;
+}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;                //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
+  UCHAR                       ucSubConnectorType;     //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
+  UCHAR                       ucReserved;
+}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
+{
+	ATOM_COMMON_RECORD_HEADER   sheader;                
+	UCHAR												ucMuxType;							//decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
+	UCHAR												ucMuxControlPin;
+	UCHAR												ucMuxState[2];					//for alligment purpose
+}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
+{
+	ATOM_COMMON_RECORD_HEADER   sheader;                
+	UCHAR												ucMuxType;
+	UCHAR												ucMuxControlPin;
+	UCHAR												ucMuxState[2];					//for alligment purpose
+}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+// define ucMuxType
+#define ATOM_ROUTER_MUX_PIN_STATE_MASK								0x0f
+#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT		0x01
+
+typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD     //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES];  //An fixed size array which maps external pins to internal GPIO_PIN_INFO table 
+}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
+
+typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD  //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ATOM_I2C_ID_CONFIG          ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES];  //An fixed size array which maps external pins to internal DDC ID
+}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
+
+typedef struct _ATOM_OBJECT_LINK_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usObjectID;         //could be connector, encorder or other object in object.h
+}ATOM_OBJECT_LINK_RECORD;
+
+typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usReserved;
+}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+/****************************************************************************/	
+// ASIC voltage data table
+/****************************************************************************/	
+typedef struct  _ATOM_VOLTAGE_INFO_HEADER
+{
+   USHORT   usVDDCBaseLevel;                //In number of 50mv unit
+   USHORT   usReserved;                     //For possible extension table offset
+   UCHAR    ucNumOfVoltageEntries;
+   UCHAR    ucBytesPerVoltageEntry;
+   UCHAR    ucVoltageStep;                  //Indicating in how many mv increament is one step, 0.5mv unit
+   UCHAR    ucDefaultVoltageEntry;
+   UCHAR    ucVoltageControlI2cLine;
+   UCHAR    ucVoltageControlAddress;
+   UCHAR    ucVoltageControlOffset;
+}ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct  _ATOM_VOLTAGE_INFO
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+   ATOM_VOLTAGE_INFO_HEADER viHeader;
+   UCHAR    ucVoltageEntries[64];            //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
+}ATOM_VOLTAGE_INFO;
+
+
+typedef struct  _ATOM_VOLTAGE_FORMULA
+{
+   USHORT   usVoltageBaseLevel;             // In number of 1mv unit
+   USHORT   usVoltageStep;                  // Indicating in how many mv increament is one step, 1mv unit
+	 UCHAR		ucNumOfVoltageEntries;					// Number of Voltage Entry, which indicate max Voltage
+	 UCHAR		ucFlag;													// bit0=0 :step is 1mv =1 0.5mv
+	 UCHAR		ucBaseVID;											// if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
+	 UCHAR		ucReserved;
+	 UCHAR		ucVIDAdjustEntries[32];					// 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA;
+
+typedef struct  _VOLTAGE_LUT_ENTRY
+{
+	 USHORT		usVoltageCode;									// The Voltage ID, either GPIO or I2C code
+	 USHORT		usVoltageValue;									// The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY;
+
+typedef struct  _ATOM_VOLTAGE_FORMULA_V2
+{
+	 UCHAR		ucNumOfVoltageEntries;					// Number of Voltage Entry, which indicate max Voltage
+	 UCHAR		ucReserved[3];
+	 VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA_V2;
+
+typedef struct _ATOM_VOLTAGE_CONTROL
+{
+	UCHAR		 ucVoltageControlId;							//Indicate it is controlled by I2C or GPIO or HW state machine		
+  UCHAR    ucVoltageControlI2cLine;
+  UCHAR    ucVoltageControlAddress;
+  UCHAR    ucVoltageControlOffset;	 	
+  USHORT   usGpioPin_AIndex;								//GPIO_PAD register index
+  UCHAR    ucGpioPinBitShift[9];						//at most 8 pin support 255 VIDs, termintate with 0xff
+	UCHAR		 ucReserved;
+}ATOM_VOLTAGE_CONTROL;
+
+// Define ucVoltageControlId
+#define	VOLTAGE_CONTROLLED_BY_HW							0x00
+#define	VOLTAGE_CONTROLLED_BY_I2C_MASK				0x7F
+#define	VOLTAGE_CONTROLLED_BY_GPIO						0x80
+#define	VOLTAGE_CONTROL_ID_LM64								0x01									//I2C control, used for R5xx Core Voltage
+#define	VOLTAGE_CONTROL_ID_DAC								0x02									//I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
+#define	VOLTAGE_CONTROL_ID_VT116xM						0x03									//I2C control, used for R6xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DS4402							0x04									
+#define VOLTAGE_CONTROL_ID_UP6266 						0x05									
+#define VOLTAGE_CONTROL_ID_SCORPIO						0x06
+#define	VOLTAGE_CONTROL_ID_VT1556M						0x07									
+#define	VOLTAGE_CONTROL_ID_CHL822x						0x08									
+#define	VOLTAGE_CONTROL_ID_VT1586M						0x09
+#define VOLTAGE_CONTROL_ID_UP1637 						0x0A
+
+typedef struct  _ATOM_VOLTAGE_OBJECT
+{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+	 UCHAR		ucSize;													//Size of Object	
+	 ATOM_VOLTAGE_CONTROL			asControl;			//describ how to control 	 
+ 	 ATOM_VOLTAGE_FORMULA			asFormula;			//Indicate How to convert real Voltage to VID 
+}ATOM_VOLTAGE_OBJECT;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_V2
+{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+	 UCHAR		ucSize;													//Size of Object	
+	 ATOM_VOLTAGE_CONTROL			asControl;			//describ how to control 	 
+ 	 ATOM_VOLTAGE_FORMULA_V2	asFormula;			//Indicate How to convert real Voltage to VID 
+}ATOM_VOLTAGE_OBJECT_V2;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V2
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT_V2			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO_V2;
+
+typedef struct  _ATOM_LEAKID_VOLTAGE
+{
+	UCHAR		ucLeakageId;
+	UCHAR		ucReserved;
+	USHORT	usVoltage;
+}ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+   UCHAR		ucVoltageMode;							    //Indicate voltage control mode: Init/Set/Leakage/Set phase 
+	 USHORT		usSize;													//Size of Object	
+}ATOM_VOLTAGE_OBJECT_HEADER_V3;
+
+typedef struct  _VOLTAGE_LUT_ENTRY_V2
+{
+	 ULONG		ulVoltageId;									  // The Voltage ID which is used to program GPIO register
+	 USHORT		usVoltageValue;									// The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
+{
+  USHORT	usVoltageLevel; 							  // The Voltage ID which is used to program GPIO register
+  USHORT  usVoltageId;                    
+	USHORT	usLeakageId;									  // The corresponding Voltage Value, in mV
+}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _ATOM_I2C_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+   UCHAR	ucVoltageRegulatorId;					  //Indicate Voltage Regulator Id
+   UCHAR    ucVoltageControlI2cLine;
+   UCHAR    ucVoltageControlAddress;
+   UCHAR    ucVoltageControlOffset;	 	
+   ULONG    ulReserved;
+   VOLTAGE_LUT_ENTRY asVolI2cLut[1];        // end with 0xff
+}ATOM_I2C_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_GPIO_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;   
+   UCHAR    ucVoltageGpioCntlId;         // default is 0 which indicate control through CG VID mode 
+   UCHAR    ucGpioEntryNum;              // indiate the entry numbers of Votlage/Gpio value Look up table
+   UCHAR    ucPhaseDelay;                // phase delay in unit of micro second
+   UCHAR    ucReserved;   
+   ULONG    ulGpioMaskVal;               // GPIO Mask value
+   VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];   
+}ATOM_GPIO_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+   UCHAR    ucLeakageCntlId;             // default is 0
+   UCHAR    ucLeakageEntryNum;           // indicate the entry number of LeakageId/Voltage Lut table
+   UCHAR    ucReserved[2];               
+   ULONG    ulMaxVoltageLevel;
+   LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];   
+}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
+
+typedef union _ATOM_VOLTAGE_OBJECT_V3{
+  ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
+  ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
+  ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
+}ATOM_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V3_1
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT_V3			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
+
+typedef struct  _ATOM_ASIC_PROFILE_VOLTAGE
+{
+	UCHAR		ucProfileId;
+	UCHAR		ucReserved;
+	USHORT	usSize;
+	USHORT	usEfuseSpareStartAddr;
+	USHORT	usFuseIndex[8];												//from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, 
+	ATOM_LEAKID_VOLTAGE					asLeakVol[2];			//Leakid and relatd voltage
+}ATOM_ASIC_PROFILE_VOLTAGE;
+
+//ucProfileId
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE			1		
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE			1
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE					2
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO
+{
+  ATOM_COMMON_TABLE_HEADER			asHeader; 
+	ATOM_ASIC_PROFILE_VOLTAGE			asVoltage;
+}ATOM_ASIC_PROFILING_INFO;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT
+{
+	UCHAR	ucPwrSrcId;													// Power source
+	UCHAR	ucPwrSensorType;										// GPIO, I2C or none
+	UCHAR	ucPwrSensId;											  // if GPIO detect, it is GPIO id,  if I2C detect, it is I2C id
+	UCHAR	ucPwrSensSlaveAddr;									// Slave address if I2C detect
+	UCHAR ucPwrSensRegIndex;									// I2C register Index if I2C detect
+	UCHAR ucPwrSensRegBitMask;								// detect which bit is used if I2C detect
+	UCHAR	ucPwrSensActiveState;								// high active or low active
+	UCHAR	ucReserve[3];												// reserve		
+	USHORT usSensPwr;													// in unit of watt
+}ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO
+{
+		ATOM_COMMON_TABLE_HEADER		asHeader;
+		UCHAR												asPwrbehave[16];
+		ATOM_POWER_SOURCE_OBJECT		asPwrObj[1];
+}ATOM_POWER_SOURCE_INFO;
+
+
+//Define ucPwrSrcId
+#define POWERSOURCE_PCIE_ID1						0x00
+#define POWERSOURCE_6PIN_CONNECTOR_ID1	0x01
+#define POWERSOURCE_8PIN_CONNECTOR_ID1	0x02
+#define POWERSOURCE_6PIN_CONNECTOR_ID2	0x04
+#define POWERSOURCE_8PIN_CONNECTOR_ID2	0x08
+
+//define ucPwrSensorId
+#define POWER_SENSOR_ALWAYS							0x00
+#define POWER_SENSOR_GPIO								0x01
+#define POWER_SENSOR_I2C								0x02
+
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+  ULONG      ulVoltageIndex;                      // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table        
+  ULONG      ulMaximumSupportedCLK;               // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+  ULONG      ulSupportedSCLK;               // Maximum clock supported with specified voltage index,  unit in 10kHz
+  USHORT     usVoltageIndex;                // The Voltage Index indicated by FUSE for specified SCLK  
+  USHORT     usVoltageID;                   // The Voltage ID indicated by FUSE for specified SCLK 
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE             1       // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;          
+  ULONG  ulBootUpUMAClock;          
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];            
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;   
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;           
+  ULONG  ulSystemConfig;            
+  ULONG  ulCPUCapInfo;              
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
+  UCHAR  ucMemoryType;  
+  UCHAR  ucUMAChannelNumber;
+  ULONG  ulCSR_M3_ARB_CNTL_DEFAULT[10];  
+  ULONG  ulCSR_M3_ARB_CNTL_UVD[10]; 
+  ULONG  ulCSR_M3_ARB_CNTL_FS3D[10];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG; 
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;  
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucLVDSReserved;
+  ULONG  ulReserved3[15]; 
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;   
+}ATOM_INTEGRATED_SYSTEM_INFO_V6;   
+
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE       0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION          0x08
+
+//ucLVDSMisc:                   
+#define SYS_INFO_LVDSMISC__888_FPDI_MODE                                             0x01
+#define SYS_INFO_LVDSMISC__DL_CH_SWAP                                                0x02
+#define SYS_INFO_LVDSMISC__888_BPC                                                   0x04
+#define SYS_INFO_LVDSMISC__OVERRIDE_EN                                               0x08
+#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW                                           0x10
+
+// not used any more
+#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW                                          0x04
+#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW                                          0x08
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:      	        Other display related flags, not defined yet. 
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[3]=0: Enable HW AUX mode detection logic
+                                        =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+**********************************************************************************************************************/
+
+// this Table is used for Liano/Ontario APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V6    sIntegratedSysInfo;   
+  ULONG  ulPowerplayTable[128];  
+}ATOM_FUSION_SYSTEM_INFO_V1; 
+/**********************************************************************************************************************
+  ATOM_FUSION_SYSTEM_INFO_V1 Description
+sIntegratedSysInfo:               refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
+ulPowerplayTable[128]:            This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]    
+**********************************************************************************************************************/ 
+
+// this IntegrateSystemInfoTable is used for Trinity APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;
+  ULONG  ulSystemConfig;            
+  ULONG  ulCPUCapInfo;
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
+  UCHAR  ucMemoryType;  
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ULONG  ulReserved[20];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG; 
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;  
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucLVDSReserved;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSReserved1;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[4];
+  USHORT usNBP2Voltage;               
+  USHORT usNBP3Voltage;
+  ULONG  ulNbpStateNClkFreq[4];
+  UCHAR  ucNBDPMEnable;
+  UCHAR  ucReserved[3];
+  UCHAR  ucDPMState0VclkFid;
+  UCHAR  ucDPMState0DclkFid;
+  UCHAR  ucDPMState1VclkFid;
+  UCHAR  ucDPMState1DclkFid;
+  UCHAR  ucDPMState2VclkFid;
+  UCHAR  ucDPMState2DclkFid;
+  UCHAR  ucDPMState3VclkFid;
+  UCHAR  ucDPMState3DclkFid;
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT            0x01
+#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT  0x02
+#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT       0x04
+#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT                         0x08
+
+// ulGPUCapInfo
+#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE                0x01
+#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE                               0x02
+#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT                         0x08
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Trinity projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:      	        bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. 
+                                        =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS. 
+                                  bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
+                                        =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
+                                  bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
+                                        =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
+                                  bit[3]=0: VBIOS fast boot is disable
+                                        =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
+                                        =1: DP mode use single PLL mode
+                                  bit[3]=0: Enable AUX HW mode detection logic
+                                        =1: Disable AUX HW mode detection logic
+                                      
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usNBP2Voltage:                    VID for voltage on NB P2 State
+usNBP3Voltage:                    VID for voltage on NB P3 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+ucLVDSPwrOnSeqDIGONtoDE_in4Ms:    LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
+                                  =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnDEtoVARY_BL_in4Ms:     LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).  
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffVARY_BLtoDE_in4Ms:    LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. 
+                                  =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffDEtoDIGON_in4Ms:      LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. 
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSOffToOnDelay_in4Ms:         LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. 
+                                  =0 means to use VBIOS default delay which is 125 ( 500ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOnVARY_BLtoBLON_in4Ms:   LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffBLONtoVARY_BL_in4Ms:  LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ulNbpStateMemclkFreq[4]:          system memory clock frequncey in unit of 10Khz in different NB pstate. 
+
+**********************************************************************************************************************/
+
+/**************************************************************************/
+// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
+//Memory SS Info Table
+//Define Memory Clock SS chip ID
+#define ICS91719  1
+#define ICS91720  2
+
+//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
+typedef struct _ATOM_I2C_DATA_RECORD
+{
+  UCHAR         ucNunberOfBytes;                                              //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
+  UCHAR         ucI2CData[1];                                                 //I2C data in bytes, should be less than 16 bytes usually
+}ATOM_I2C_DATA_RECORD;
+
+
+//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
+{
+  ATOM_I2C_ID_CONFIG_ACCESS       sucI2cId;               //I2C line and HW/SW assisted cap.
+  UCHAR		                        ucSSChipID;             //SS chip being used
+  UCHAR		                        ucSSChipSlaveAddr;      //Slave Address to set up this SS chip
+  UCHAR                           ucNumOfI2CDataRecords;  //number of data block
+  ATOM_I2C_DATA_RECORD            asI2CData[1];  
+}ATOM_I2C_DEVICE_SETUP_INFO;
+
+//==========================================================================================
+typedef struct  _ATOM_ASIC_MVDD_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_I2C_DEVICE_SETUP_INFO      asI2CSetup[1];
+}ATOM_ASIC_MVDD_INFO;
+
+//==========================================================================================
+#define ATOM_MCLK_SS_INFO         ATOM_ASIC_MVDD_INFO
+
+//==========================================================================================
+/**************************************************************************/
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+{
+	ULONG								ulTargetClockRange;						//Clock Out frequence (VCO ), in unit of 10Khz
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01%
+	USHORT							usSpreadRateInKhz;						//in unit of kHz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit1=0 Down Spread,=1 Center Spread.
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT;
+
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
+//SS is not required or enabled if a match is not found.
+#define ASIC_INTERNAL_MEMORY_SS			1
+#define ASIC_INTERNAL_ENGINE_SS			2
+#define ASIC_INTERNAL_UVD_SS        3
+#define ASIC_INTERNAL_SS_ON_TMDS    4
+#define ASIC_INTERNAL_SS_ON_HDMI    5
+#define ASIC_INTERNAL_SS_ON_LVDS    6
+#define ASIC_INTERNAL_SS_ON_DP      7
+#define ASIC_INTERNAL_SS_ON_DCPLL   8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
+#define ASIC_INTERNAL_VCE_SS        10
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+{
+	ULONG								ulTargetClockRange;						//For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+                                                    //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01%
+	USHORT							usSpreadRateIn10Hz;						//in unit of 10Hz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V2;
+
+//ucSpreadSpectrumMode
+//#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+//#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+//#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+//#define ATOM_INTERNAL_SS_MASK                  0x00000000
+//#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT		      asSpreadSpectrum[4];
+}ATOM_ASIC_INTERNAL_SS_INFO;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT_V2		  asSpreadSpectrum[1];      //this is point only. 
+}ATOM_ASIC_INTERNAL_SS_INFO_V2;
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
+{
+	ULONG								ulTargetClockRange;						//For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+                                                    //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01%
+	USHORT							usSpreadRateIn10Hz;						//in unit of 10Hz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V3;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT_V3		  asSpreadSpectrum[1];      //this is pointer only. 
+}ATOM_ASIC_INTERNAL_SS_INFO_V3;
+
+
+//==============================Scratch Pad Definition Portion===============================
+#define ATOM_DEVICE_CONNECT_INFO_DEF  0
+#define ATOM_ROM_LOCATION_DEF         1
+#define ATOM_TV_STANDARD_DEF          2
+#define ATOM_ACTIVE_INFO_DEF          3
+#define ATOM_LCD_INFO_DEF             4
+#define ATOM_DOS_REQ_INFO_DEF         5
+#define ATOM_ACC_CHANGE_INFO_DEF      6
+#define ATOM_DOS_MODE_INFO_DEF        7
+#define ATOM_I2C_CHANNEL_STATUS_DEF   8
+#define ATOM_I2C_CHANNEL_STATUS1_DEF  9
+#define ATOM_INTERNAL_TIMER_DEF       10
+
+// BIOS_0_SCRATCH Definition 
+#define ATOM_S0_CRT1_MONO               0x00000001L
+#define ATOM_S0_CRT1_COLOR              0x00000002L
+#define ATOM_S0_CRT1_MASK               (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE_A         0x00000004L
+#define ATOM_S0_TV1_SVIDEO_A            0x00000008L
+#define ATOM_S0_TV1_MASK_A              (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
+
+#define ATOM_S0_CV_A                    0x00000010L
+#define ATOM_S0_CV_DIN_A                0x00000020L
+#define ATOM_S0_CV_MASK_A               (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
+
+#define ATOM_S0_CRT2_MONO               0x00000100L
+#define ATOM_S0_CRT2_COLOR              0x00000200L
+#define ATOM_S0_CRT2_MASK               (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE           0x00000400L
+#define ATOM_S0_TV1_SVIDEO              0x00000800L
+#define ATOM_S0_TV1_SCART               0x00004000L
+#define ATOM_S0_TV1_MASK                (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
+
+#define ATOM_S0_CV                      0x00001000L
+#define ATOM_S0_CV_DIN                  0x00002000L
+#define ATOM_S0_CV_MASK                 (ATOM_S0_CV+ATOM_S0_CV_DIN)
+
+#define ATOM_S0_DFP1                    0x00010000L
+#define ATOM_S0_DFP2                    0x00020000L
+#define ATOM_S0_LCD1                    0x00040000L
+#define ATOM_S0_LCD2                    0x00080000L
+#define ATOM_S0_DFP6                    0x00100000L
+#define ATOM_S0_DFP3                    0x00200000L
+#define ATOM_S0_DFP4                    0x00400000L
+#define ATOM_S0_DFP5                    0x00800000L
+
+#define ATOM_S0_DFP_MASK                ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
+
+#define ATOM_S0_FAD_REGISTER_BUG        0x02000000L // If set, indicates we are running a PCIE asic with 
+                                                    // the FAD/HDP reg access bug.  Bit is read by DAL, this is obsolete from RV5xx
+
+#define ATOM_S0_THERMAL_STATE_MASK      0x1C000000L
+#define ATOM_S0_THERMAL_STATE_SHIFT     26
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 
+
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC     1
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S0_CRT1_MONOb0             0x01
+#define ATOM_S0_CRT1_COLORb0            0x02
+#define ATOM_S0_CRT1_MASKb0             (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
+
+#define ATOM_S0_TV1_COMPOSITEb0         0x04
+#define ATOM_S0_TV1_SVIDEOb0            0x08
+#define ATOM_S0_TV1_MASKb0              (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
+
+#define ATOM_S0_CVb0                    0x10
+#define ATOM_S0_CV_DINb0                0x20
+#define ATOM_S0_CV_MASKb0               (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
+
+#define ATOM_S0_CRT2_MONOb1             0x01
+#define ATOM_S0_CRT2_COLORb1            0x02
+#define ATOM_S0_CRT2_MASKb1             (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
+
+#define ATOM_S0_TV1_COMPOSITEb1         0x04
+#define ATOM_S0_TV1_SVIDEOb1            0x08
+#define ATOM_S0_TV1_SCARTb1             0x40
+#define ATOM_S0_TV1_MASKb1              (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
+
+#define ATOM_S0_CVb1                    0x10
+#define ATOM_S0_CV_DINb1                0x20
+#define ATOM_S0_CV_MASKb1               (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
+
+#define ATOM_S0_DFP1b2                  0x01
+#define ATOM_S0_DFP2b2                  0x02
+#define ATOM_S0_LCD1b2                  0x04
+#define ATOM_S0_LCD2b2                  0x08
+#define ATOM_S0_DFP6b2                  0x10
+#define ATOM_S0_DFP3b2                  0x20
+#define ATOM_S0_DFP4b2                  0x40
+#define ATOM_S0_DFP5b2                  0x80
+
+
+#define ATOM_S0_THERMAL_STATE_MASKb3    0x1C
+#define ATOM_S0_THERMAL_STATE_SHIFTb3   2
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
+#define ATOM_S0_LCD1_SHIFT              18
+
+// BIOS_1_SCRATCH Definition
+#define ATOM_S1_ROM_LOCATION_MASK       0x0000FFFFL
+#define ATOM_S1_PCI_BUS_DEV_MASK        0xFFFF0000L
+
+//	BIOS_2_SCRATCH Definition
+#define ATOM_S2_TV1_STANDARD_MASK       0x0000000FL
+#define ATOM_S2_CURRENT_BL_LEVEL_MASK   0x0000FF00L
+#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT  8
+
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK       0x0C000000L
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE     0x10000000L
+
+#define ATOM_S2_DEVICE_DPMS_STATE       0x00010000L
+#define ATOM_S2_VRI_BRIGHT_ENABLE       0x20000000L
+
+#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE     0x0
+#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE    0x1
+#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE   0x2
+#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE   0x3
+#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
+#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK   0xC0000000L
+
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S2_TV1_STANDARD_MASKb0     0x0F
+#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
+#define ATOM_S2_DEVICE_DPMS_STATEb2     0x01
+
+#define ATOM_S2_DEVICE_DPMS_MASKw1      0x3FF
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3     0x0C
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3   0x10
+#define ATOM_S2_TMDS_COHERENT_MODEb3    0x10          // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
+#define ATOM_S2_VRI_BRIGHT_ENABLEb3     0x20
+#define ATOM_S2_ROTATION_STATE_MASKb3   0xC0
+
+
+// BIOS_3_SCRATCH Definition
+#define ATOM_S3_CRT1_ACTIVE             0x00000001L
+#define ATOM_S3_LCD1_ACTIVE             0x00000002L
+#define ATOM_S3_TV1_ACTIVE              0x00000004L
+#define ATOM_S3_DFP1_ACTIVE             0x00000008L
+#define ATOM_S3_CRT2_ACTIVE             0x00000010L
+#define ATOM_S3_LCD2_ACTIVE             0x00000020L
+#define ATOM_S3_DFP6_ACTIVE             0x00000040L
+#define ATOM_S3_DFP2_ACTIVE             0x00000080L
+#define ATOM_S3_CV_ACTIVE               0x00000100L
+#define ATOM_S3_DFP3_ACTIVE							0x00000200L
+#define ATOM_S3_DFP4_ACTIVE							0x00000400L
+#define ATOM_S3_DFP5_ACTIVE							0x00000800L
+
+#define ATOM_S3_DEVICE_ACTIVE_MASK      0x00000FFFL
+
+#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE         0x00001000L
+#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
+
+#define ATOM_S3_CRT1_CRTC_ACTIVE        0x00010000L
+#define ATOM_S3_LCD1_CRTC_ACTIVE        0x00020000L
+#define ATOM_S3_TV1_CRTC_ACTIVE         0x00040000L
+#define ATOM_S3_DFP1_CRTC_ACTIVE        0x00080000L
+#define ATOM_S3_CRT2_CRTC_ACTIVE        0x00100000L
+#define ATOM_S3_LCD2_CRTC_ACTIVE        0x00200000L
+#define ATOM_S3_DFP6_CRTC_ACTIVE        0x00400000L
+#define ATOM_S3_DFP2_CRTC_ACTIVE        0x00800000L
+#define ATOM_S3_CV_CRTC_ACTIVE          0x01000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE				0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE				0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE				0x08000000L
+
+#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNG    0x20000000L
+//Below two definitions are not supported in pplib, but in the old powerplay in DAL
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCH   0x40000000L
+#define ATOM_S3_RQST_GPU_USE_MIN_PWR    0x80000000L
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S3_CRT1_ACTIVEb0           0x01
+#define ATOM_S3_LCD1_ACTIVEb0           0x02
+#define ATOM_S3_TV1_ACTIVEb0            0x04
+#define ATOM_S3_DFP1_ACTIVEb0           0x08
+#define ATOM_S3_CRT2_ACTIVEb0           0x10
+#define ATOM_S3_LCD2_ACTIVEb0           0x20
+#define ATOM_S3_DFP6_ACTIVEb0           0x40
+#define ATOM_S3_DFP2_ACTIVEb0           0x80
+#define ATOM_S3_CV_ACTIVEb1             0x01
+#define ATOM_S3_DFP3_ACTIVEb1						0x02
+#define ATOM_S3_DFP4_ACTIVEb1						0x04
+#define ATOM_S3_DFP5_ACTIVEb1						0x08
+
+#define ATOM_S3_ACTIVE_CRTC1w0          0xFFF
+
+#define ATOM_S3_CRT1_CRTC_ACTIVEb2      0x01
+#define ATOM_S3_LCD1_CRTC_ACTIVEb2      0x02
+#define ATOM_S3_TV1_CRTC_ACTIVEb2       0x04
+#define ATOM_S3_DFP1_CRTC_ACTIVEb2      0x08
+#define ATOM_S3_CRT2_CRTC_ACTIVEb2      0x10
+#define ATOM_S3_LCD2_CRTC_ACTIVEb2      0x20
+#define ATOM_S3_DFP6_CRTC_ACTIVEb2      0x40
+#define ATOM_S3_DFP2_CRTC_ACTIVEb2      0x80
+#define ATOM_S3_CV_CRTC_ACTIVEb3        0x01
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3			0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3			0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3			0x08
+
+#define ATOM_S3_ACTIVE_CRTC2w1          0xFFF
+
+// BIOS_4_SCRATCH Definition
+#define ATOM_S4_LCD1_PANEL_ID_MASK      0x000000FFL
+#define ATOM_S4_LCD1_REFRESH_MASK       0x0000FF00L
+#define ATOM_S4_LCD1_REFRESH_SHIFT      8
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S4_LCD1_PANEL_ID_MASKb0	  0x0FF
+#define ATOM_S4_LCD1_REFRESH_MASKb1		  ATOM_S4_LCD1_PANEL_ID_MASKb0
+#define ATOM_S4_VRAM_INFO_MASKb2        ATOM_S4_LCD1_PANEL_ID_MASKb0
+
+// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
+#define ATOM_S5_DOS_REQ_CRT1b0          0x01
+#define ATOM_S5_DOS_REQ_LCD1b0          0x02
+#define ATOM_S5_DOS_REQ_TV1b0           0x04
+#define ATOM_S5_DOS_REQ_DFP1b0          0x08
+#define ATOM_S5_DOS_REQ_CRT2b0          0x10
+#define ATOM_S5_DOS_REQ_LCD2b0          0x20
+#define ATOM_S5_DOS_REQ_DFP6b0          0x40
+#define ATOM_S5_DOS_REQ_DFP2b0          0x80
+#define ATOM_S5_DOS_REQ_CVb1            0x01
+#define ATOM_S5_DOS_REQ_DFP3b1					0x02
+#define ATOM_S5_DOS_REQ_DFP4b1					0x04
+#define ATOM_S5_DOS_REQ_DFP5b1					0x08
+
+#define ATOM_S5_DOS_REQ_DEVICEw0        0x0FFF
+
+#define ATOM_S5_DOS_REQ_CRT1            0x0001
+#define ATOM_S5_DOS_REQ_LCD1            0x0002
+#define ATOM_S5_DOS_REQ_TV1             0x0004
+#define ATOM_S5_DOS_REQ_DFP1            0x0008
+#define ATOM_S5_DOS_REQ_CRT2            0x0010
+#define ATOM_S5_DOS_REQ_LCD2            0x0020
+#define ATOM_S5_DOS_REQ_DFP6            0x0040
+#define ATOM_S5_DOS_REQ_DFP2            0x0080
+#define ATOM_S5_DOS_REQ_CV              0x0100
+#define ATOM_S5_DOS_REQ_DFP3            0x0200
+#define ATOM_S5_DOS_REQ_DFP4            0x0400
+#define ATOM_S5_DOS_REQ_DFP5            0x0800
+
+#define ATOM_S5_DOS_FORCE_CRT1b2        ATOM_S5_DOS_REQ_CRT1b0
+#define ATOM_S5_DOS_FORCE_TV1b2         ATOM_S5_DOS_REQ_TV1b0
+#define ATOM_S5_DOS_FORCE_CRT2b2        ATOM_S5_DOS_REQ_CRT2b0
+#define ATOM_S5_DOS_FORCE_CVb3          ATOM_S5_DOS_REQ_CVb1
+#define ATOM_S5_DOS_FORCE_DEVICEw1      (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
+                                        (ATOM_S5_DOS_FORCE_CVb3<<8))
+
+// BIOS_6_SCRATCH Definition
+#define ATOM_S6_DEVICE_CHANGE           0x00000001L
+#define ATOM_S6_SCALER_CHANGE           0x00000002L
+#define ATOM_S6_LID_CHANGE              0x00000004L
+#define ATOM_S6_DOCKING_CHANGE          0x00000008L
+#define ATOM_S6_ACC_MODE                0x00000010L
+#define ATOM_S6_EXT_DESKTOP_MODE        0x00000020L
+#define ATOM_S6_LID_STATE               0x00000040L
+#define ATOM_S6_DOCK_STATE              0x00000080L
+#define ATOM_S6_CRITICAL_STATE          0x00000100L
+#define ATOM_S6_HW_I2C_BUSY_STATE       0x00000200L
+#define ATOM_S6_THERMAL_STATE_CHANGE    0x00000400L
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS   0x00000800L
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL         0x00001000L //Normal expansion Request bit for LCD
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO  0x00002000L //Aspect ratio expansion Request bit for LCD
+
+#define ATOM_S6_DISPLAY_STATE_CHANGE    0x00004000L        //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
+#define ATOM_S6_I2C_STATE_CHANGE        0x00008000L        //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
+
+#define ATOM_S6_ACC_REQ_CRT1            0x00010000L
+#define ATOM_S6_ACC_REQ_LCD1            0x00020000L
+#define ATOM_S6_ACC_REQ_TV1             0x00040000L
+#define ATOM_S6_ACC_REQ_DFP1            0x00080000L
+#define ATOM_S6_ACC_REQ_CRT2            0x00100000L
+#define ATOM_S6_ACC_REQ_LCD2            0x00200000L
+#define ATOM_S6_ACC_REQ_DFP6            0x00400000L
+#define ATOM_S6_ACC_REQ_DFP2            0x00800000L
+#define ATOM_S6_ACC_REQ_CV              0x01000000L
+#define ATOM_S6_ACC_REQ_DFP3						0x02000000L
+#define ATOM_S6_ACC_REQ_DFP4						0x04000000L
+#define ATOM_S6_ACC_REQ_DFP5						0x08000000L
+
+#define ATOM_S6_ACC_REQ_MASK                0x0FFF0000L
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE    0x10000000L
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH    0x20000000L
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE       0x40000000L
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK  0x80000000L
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S6_DEVICE_CHANGEb0         0x01
+#define ATOM_S6_SCALER_CHANGEb0         0x02
+#define ATOM_S6_LID_CHANGEb0            0x04
+#define ATOM_S6_DOCKING_CHANGEb0        0x08
+#define ATOM_S6_ACC_MODEb0              0x10
+#define ATOM_S6_EXT_DESKTOP_MODEb0      0x20
+#define ATOM_S6_LID_STATEb0             0x40
+#define ATOM_S6_DOCK_STATEb0            0x80
+#define ATOM_S6_CRITICAL_STATEb1        0x01
+#define ATOM_S6_HW_I2C_BUSY_STATEb1     0x02  
+#define ATOM_S6_THERMAL_STATE_CHANGEb1  0x04
+#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1        0x10    
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 
+
+#define ATOM_S6_ACC_REQ_CRT1b2          0x01
+#define ATOM_S6_ACC_REQ_LCD1b2          0x02
+#define ATOM_S6_ACC_REQ_TV1b2           0x04
+#define ATOM_S6_ACC_REQ_DFP1b2          0x08
+#define ATOM_S6_ACC_REQ_CRT2b2          0x10
+#define ATOM_S6_ACC_REQ_LCD2b2          0x20
+#define ATOM_S6_ACC_REQ_DFP6b2          0x40
+#define ATOM_S6_ACC_REQ_DFP2b2          0x80
+#define ATOM_S6_ACC_REQ_CVb3            0x01
+#define ATOM_S6_ACC_REQ_DFP3b3          0x02
+#define ATOM_S6_ACC_REQ_DFP4b3          0x04
+#define ATOM_S6_ACC_REQ_DFP5b3          0x08
+
+#define ATOM_S6_ACC_REQ_DEVICEw1        ATOM_S5_DOS_REQ_DEVICEw0
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3    0x40
+#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3    0x80
+
+#define ATOM_S6_DEVICE_CHANGE_SHIFT             0
+#define ATOM_S6_SCALER_CHANGE_SHIFT             1
+#define ATOM_S6_LID_CHANGE_SHIFT                2
+#define ATOM_S6_DOCKING_CHANGE_SHIFT            3
+#define ATOM_S6_ACC_MODE_SHIFT                  4
+#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT          5
+#define ATOM_S6_LID_STATE_SHIFT                 6
+#define ATOM_S6_DOCK_STATE_SHIFT                7
+#define ATOM_S6_CRITICAL_STATE_SHIFT            8
+#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT         9
+#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT      10
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT     11
+#define ATOM_S6_REQ_SCALER_SHIFT                12
+#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT         13
+#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT      14
+#define ATOM_S6_I2C_STATE_CHANGE_SHIFT          15
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT  28
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT  29
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT     30
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT     31
+
+// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
+#define ATOM_S7_DOS_MODE_TYPEb0             0x03
+#define ATOM_S7_DOS_MODE_VGAb0              0x00
+#define ATOM_S7_DOS_MODE_VESAb0             0x01
+#define ATOM_S7_DOS_MODE_EXTb0              0x02
+#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0      0x0C
+#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0     0xF0
+#define ATOM_S7_DOS_8BIT_DAC_ENb1           0x01
+#define ATOM_S7_DOS_MODE_NUMBERw1           0x0FFFF
+
+#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT       8
+
+// BIOS_8_SCRATCH Definition
+#define ATOM_S8_I2C_CHANNEL_BUSY_MASK       0x00000FFFF
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK     0x0FFFF0000   
+
+#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT      0
+#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT       16
+
+// BIOS_9_SCRATCH Definition
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK  0x0000FFFF
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK  
+#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK    0xFFFF0000
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT   
+#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT   16
+#endif
+
+ 
+#define ATOM_FLAG_SET                         0x20
+#define ATOM_FLAG_CLEAR                       0
+#define CLEAR_ATOM_S6_ACC_MODE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE             ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE     ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE             ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_LID_STATE                 ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE               ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE			          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE              ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE      ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE  ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS     ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE            ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)  
+#define CLEAR_ATOM_S6_REQ_SCALER              ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO         ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO       ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE      ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG           ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1                    ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )|  ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN           ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN         ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/	
+//Portion II: Definitinos only used in Driver
+/****************************************************************************/
+
+// Macros used by driver
+#ifdef __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
+#else // not __cplusplus
+#define	GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+#endif // __cplusplus
+
+#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
+#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
+
+/****************************************************************************/	
+//Portion III: Definitinos only used in VBIOS
+/****************************************************************************/
+#define ATOM_DAC_SRC					0x80
+#define ATOM_SRC_DAC1					0
+#define ATOM_SRC_DAC2					0x80
+
+typedef struct _MEMORY_PLLINIT_PARAMETERS
+{
+  ULONG ulTargetMemoryClock; //In 10Khz unit
+  UCHAR   ucAction;					 //not define yet
+  UCHAR   ucFbDiv_Hi;				 //Fbdiv Hi byte
+  UCHAR   ucFbDiv;					 //FB value
+  UCHAR   ucPostDiv;				 //Post div
+}MEMORY_PLLINIT_PARAMETERS;
+
+#define MEMORY_PLLINIT_PS_ALLOCATION  MEMORY_PLLINIT_PARAMETERS
+
+
+#define	GPIO_PIN_WRITE													0x01			
+#define	GPIO_PIN_READ														0x00
+
+typedef struct  _GPIO_PIN_CONTROL_PARAMETERS
+{
+  UCHAR ucGPIO_ID;           //return value, read from GPIO pins
+  UCHAR ucGPIOBitShift;	     //define which bit in uGPIOBitVal need to be update 
+	UCHAR ucGPIOBitVal;		     //Set/Reset corresponding bit defined in ucGPIOBitMask
+  UCHAR ucAction;				     //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
+}GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS
+{
+  UCHAR ucScaler;            // ATOM_SCALER1, ATOM_SCALER2
+  UCHAR ucEnable;            // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
+  UCHAR ucTVStandard;        // 
+  UCHAR ucPadding[1];
+}ENABLE_SCALER_PARAMETERS; 
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS 
+
+//ucEnable:
+#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION    0
+#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION  1
+#define SCALER_ENABLE_2TAP_ALPHA_MODE               2
+#define SCALER_ENABLE_MULTITAP_MODE                 3
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
+{
+  ULONG  usHWIconHorzVertPosn;        // Hardware Icon Vertical position
+  UCHAR  ucHWIconVertOffset;          // Hardware Icon Vertical offset
+  UCHAR  ucHWIconHorzOffset;          // Hardware Icon Horizontal offset
+  UCHAR  ucSelection;                 // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
+{
+  ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS  sEnableIcon;
+  ENABLE_CRTC_PARAMETERS                  sReserved;  
+}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2	
+  UCHAR  ucPadding[3];
+}ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucPadding[2];
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT usDeviceId;                  // Active Device Id for this surface. If no device, set to 0. 
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  USHORT usGraphPitch;
+  UCHAR  ucColorDepth;
+  UCHAR  ucPixelFormat;
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucModeType;
+  UCHAR  ucReserved;
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
+
+// ucEnable
+#define ATOM_GRAPH_CONTROL_SET_PITCH             0x0f
+#define ATOM_GRAPH_CONTROL_SET_DISP_START        0x10
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+{
+  ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;          
+  ENABLE_YUV_PS_ALLOCATION        sReserved; // Don't set this one
+}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS
+{
+  USHORT  usMemoryStart;                //in 8Kb boundary, offset from memory base address
+  USHORT  usMemorySize;                 //8Kb blocks aligned
+}MEMORY_CLEAN_UP_PARAMETERS;
+#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
+
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+{
+  USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC                 
+  USHORT  usY_Size;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS; 
+
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
+{
+  union{
+    USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC                 
+    USHORT  usSurface; 
+  };
+  USHORT usY_Size;
+  USHORT usDispXStart;               
+  USHORT usDispYStart;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2; 
+
+
+typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3 
+{
+  UCHAR  ucLutId;
+  UCHAR  ucAction;
+  USHORT usLutStartIndex;
+  USHORT usLutLength;
+  USHORT usLutOffsetInVram;
+}PALETTE_DATA_CONTROL_PARAMETERS_V3;
+
+// ucAction:
+#define PALETTE_DATA_AUTO_FILL            1
+#define PALETTE_DATA_READ                 2
+#define PALETTE_DATA_WRITE                3
+
+
+typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
+{
+  UCHAR  ucInterruptId;
+  UCHAR  ucServiceId;
+  UCHAR  ucStatus;
+  UCHAR  ucReserved;
+}INTERRUPT_SERVICE_PARAMETER_V2;
+
+// ucInterruptId
+#define HDP1_INTERRUPT_ID                 1
+#define HDP2_INTERRUPT_ID                 2
+#define HDP3_INTERRUPT_ID                 3
+#define HDP4_INTERRUPT_ID                 4
+#define HDP5_INTERRUPT_ID                 5
+#define HDP6_INTERRUPT_ID                 6
+#define SW_INTERRUPT_ID                   11   
+
+// ucAction
+#define INTERRUPT_SERVICE_GEN_SW_INT      1
+#define INTERRUPT_SERVICE_GET_STATUS      2
+
+ // ucStatus
+#define INTERRUPT_STATUS__INT_TRIGGER     1
+#define INTERRUPT_STATUS__HPD_HIGH        2
+
+typedef struct _INDIRECT_IO_ACCESS
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    IOAccessSequence[256];
+} INDIRECT_IO_ACCESS;
+
+#define INDIRECT_READ              0x00
+#define INDIRECT_WRITE             0x80
+
+#define INDIRECT_IO_MM             0
+#define INDIRECT_IO_PLL            1
+#define INDIRECT_IO_MC             2
+#define INDIRECT_IO_PCIE           3
+#define INDIRECT_IO_PCIEP          4
+#define INDIRECT_IO_NBMISC         5
+
+#define INDIRECT_IO_PLL_READ       INDIRECT_IO_PLL   | INDIRECT_READ
+#define INDIRECT_IO_PLL_WRITE      INDIRECT_IO_PLL   | INDIRECT_WRITE
+#define INDIRECT_IO_MC_READ        INDIRECT_IO_MC    | INDIRECT_READ
+#define INDIRECT_IO_MC_WRITE       INDIRECT_IO_MC    | INDIRECT_WRITE
+#define INDIRECT_IO_PCIE_READ      INDIRECT_IO_PCIE  | INDIRECT_READ
+#define INDIRECT_IO_PCIE_WRITE     INDIRECT_IO_PCIE  | INDIRECT_WRITE
+#define INDIRECT_IO_PCIEP_READ     INDIRECT_IO_PCIEP | INDIRECT_READ
+#define INDIRECT_IO_PCIEP_WRITE    INDIRECT_IO_PCIEP | INDIRECT_WRITE
+#define INDIRECT_IO_NBMISC_READ    INDIRECT_IO_NBMISC | INDIRECT_READ
+#define INDIRECT_IO_NBMISC_WRITE   INDIRECT_IO_NBMISC | INDIRECT_WRITE
+
+typedef struct _ATOM_OEM_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE
+{
+   UCHAR	ucVMode_Num;			  //Video mode number
+   UCHAR	ucTV_Mode_Num;			//Internal TV mode number
+}ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+   USHORT	usTV_Mode_LUT_Offset;	// Pointer to standard to internal number conversion table
+   USHORT	usTV_FIFO_Offset;		  // Pointer to FIFO entry table
+   USHORT	usNTSC_Tbl_Offset;		// Pointer to SDTV_Mode_NTSC table
+   USHORT	usPAL_Tbl_Offset;		  // Pointer to SDTV_Mode_PAL table 
+   USHORT	usCV_Tbl_Offset;		  // Pointer to SDTV_Mode_PAL table 
+}ATOM_BIOS_INT_TVSTD_MODE;
+
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR
+{
+   USHORT	ucFilter0_Offset;		//Pointer to filter format 0 coefficients
+   USHORT	usFilter1_Offset;		//Pointer to filter format 0 coefficients
+   UCHAR	ucTV_Mode_Num;
+}ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT 				 aModeTimings[16];      // 16 is not the real array number, just for initial allocation
+}ATOM_STANDARD_VESA_TIMING;
+
+
+typedef struct _ATOM_STD_FORMAT
+{ 
+  USHORT    usSTD_HDisp;
+  USHORT    usSTD_VDisp;
+  USHORT    usSTD_RefreshRate;
+  USHORT    usReserved;
+}ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE
+{
+  USHORT  usVESA_ModeNumber;
+  USHORT  usExtendedModeNumber;
+}ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
+{ 
+  ATOM_COMMON_TABLE_HEADER   sHeader;  
+  ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+}ATOM_VESA_TO_INTENAL_MODE_LUT;
+
+/*************** ATOM Memory Related Data Structure ***********************/
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
+	UCHAR												ucMemoryType;
+	UCHAR												ucMemoryVendor;
+	UCHAR												ucAdjMCId;
+	UCHAR												ucDynClkId;
+	ULONG												ulDllResetClkRange;
+}ATOM_MEMORY_VENDOR_BLOCK;
+
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
+#if ATOM_BIG_ENDIAN
+	ULONG												ucMemBlkId:8;
+	ULONG												ulMemClockRange:24;
+#else
+	ULONG												ulMemClockRange:24;
+	ULONG												ucMemBlkId:8;
+#endif
+}ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
+{
+  ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+  ULONG                         ulAccess;
+}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
+	ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS			ulMemoryID;
+	ULONG															        aulMemData[1];
+}ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
+	 USHORT											usRegIndex;                                     // MC register index
+	 UCHAR											ucPreRegDataLength;                             // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
+}ATOM_INIT_REG_INDEX_FORMAT;
+
+
+typedef struct _ATOM_INIT_REG_BLOCK{
+	USHORT													usRegIndexTblSize;													//size of asRegIndexBuf
+	USHORT													usRegDataBlkSize;														//size of ATOM_MEMORY_SETTING_DATA_BLOCK
+	ATOM_INIT_REG_INDEX_FORMAT			asRegIndexBuf[1];
+	ATOM_MEMORY_SETTING_DATA_BLOCK	asRegDataBuf[1];
+}ATOM_INIT_REG_BLOCK;
+
+#define END_OF_REG_INDEX_BLOCK  0x0ffff
+#define END_OF_REG_DATA_BLOCK   0x00000000
+#define ATOM_INIT_REG_MASK_FLAG 0x80               //Not used in BIOS
+#define	CLOCK_RANGE_HIGHEST			0x00ffffff
+
+#define VALUE_DWORD             SIZEOF ULONG
+#define VALUE_SAME_AS_ABOVE     0
+#define VALUE_MASK_DWORD        0x84
+
+#define INDEX_ACCESS_RANGE_BEGIN	    (VALUE_DWORD + 1)
+#define INDEX_ACCESS_RANGE_END		    (INDEX_ACCESS_RANGE_BEGIN + 1)
+#define VALUE_INDEX_ACCESS_SINGLE	    (INDEX_ACCESS_RANGE_END + 1)
+//#define ACCESS_MCIODEBUGIND            0x40       //defined in BIOS code
+#define ACCESS_PLACEHOLDER             0x80
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE
+{ 
+  ATOM_COMMON_TABLE_HEADER		sHeader;
+  USHORT											usAdjustARB_SEQDataOffset;
+  USHORT											usMCInitMemTypeTblOffset;
+  USHORT											usMCInitCommonTblOffset;
+  USHORT											usMCInitPowerDownTblOffset;
+	ULONG												ulARB_SEQDataBuf[32];
+	ATOM_INIT_REG_BLOCK					asMCInitMemType;
+	ATOM_INIT_REG_BLOCK					asMCInitCommon;
+}ATOM_MC_INIT_PARAM_TABLE;
+
+
+#define _4Mx16              0x2
+#define _4Mx32              0x3
+#define _8Mx16              0x12
+#define _8Mx32              0x13
+#define _16Mx16             0x22
+#define _16Mx32             0x23
+#define _32Mx16             0x32
+#define _32Mx32             0x33
+#define _64Mx8              0x41
+#define _64Mx16             0x42
+#define _64Mx32             0x43
+#define _128Mx8             0x51
+#define _128Mx16            0x52
+#define _256Mx8             0x61
+#define _256Mx16            0x62
+
+#define SAMSUNG             0x1
+#define INFINEON            0x2
+#define ELPIDA              0x3
+#define ETRON               0x4
+#define NANYA               0x5
+#define HYNIX               0x6
+#define MOSEL               0x7
+#define WINBOND             0x8
+#define ESMT                0x9
+#define MICRON              0xF
+
+#define QIMONDA             INFINEON
+#define PROMOS              MOSEL
+#define KRETON              INFINEON
+#define ELIXIR              NANYA
+
+/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
+
+#define UCODE_ROM_START_ADDRESS		0x1b800
+#define	UCODE_SIGNATURE			0x4375434d // 'MCuC' - MC uCode
+
+//uCode block header for reference
+
+typedef struct _MCuCodeHeader
+{
+  ULONG  ulSignature;
+  UCHAR  ucRevision;
+  UCHAR  ucChecksum;
+  UCHAR  ucReserved1;
+  UCHAR  ucReserved2;
+  USHORT usParametersLength;
+  USHORT usUCodeLength;
+  USHORT usReserved1;
+  USHORT usReserved2;
+} MCuCodeHeader;
+
+//////////////////////////////////////////////////////////////////////////////////
+
+#define ATOM_MAX_NUMBER_OF_VRAM_MODULE	16
+
+#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK	0xF
+typedef struct _ATOM_VRAM_MODULE_V1
+{
+  ULONG                      ulReserved;
+  USHORT                     usEMRSValue;  
+  USHORT                     usMRSValue;
+  USHORT                     usReserved;
+  UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender 
+  UCHAR                      ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+  UCHAR                      ucChannelNum;      // Number of channel;
+  UCHAR                      ucChannelConfig;   // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+  UCHAR                      ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+  UCHAR                      ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+  UCHAR                      ucReserved[2];
+}ATOM_VRAM_MODULE_V1;
+
+
+typedef struct _ATOM_VRAM_MODULE_V2
+{
+  ULONG                      ulReserved;
+  ULONG                      ulFlags;     			// To enable/disable functionalities based on memory type
+  ULONG                      ulEngineClock;     // Override of default engine clock for particular memory type
+  ULONG                      ulMemoryClock;     // Override of default memory clock for particular memory type
+  USHORT                     usEMRS2Value;      // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT                     usEMRS3Value;      // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  USHORT                     usEMRSValue;  
+  USHORT                     usMRSValue;
+  USHORT                     usReserved;
+  UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+  UCHAR                      ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+  UCHAR                      ucChannelNum;      // Number of channel;
+  UCHAR                      ucChannelConfig;   // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+  UCHAR                      ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+  UCHAR                      ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+  UCHAR                      ucRefreshRateFactor;
+  UCHAR                      ucReserved[3];
+}ATOM_VRAM_MODULE_V2;
+
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+  union{
+	  USHORT										 usMRS;							// mode register						
+    USHORT                     usDDR3_MR0;
+  };
+  union{
+	  USHORT										 usEMRS;						// extended mode register
+    USHORT                     usDDR3_MR1;
+  };
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+  union 
+  {
+    struct {
+	    UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+	    UCHAR											 ucReserved;						
+    };
+    USHORT                   usDDR3_MR2;
+  };
+}ATOM_MEMORY_TIMING_FORMAT;
+
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT_V1
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+	USHORT										 usMRS;							// mode register						
+	USHORT										 usEMRS;						// extended mode register
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+	UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+////////////////////////////////////GDDR parameters///////////////////////////////////
+	UCHAR											 uctCCDL;						// 
+	UCHAR											 uctCRCRL;						// 
+	UCHAR											 uctCRCWL;						// 
+	UCHAR											 uctCKE;						// 
+	UCHAR											 uctCKRSE;						// 
+	UCHAR											 uctCKRSX;						// 
+	UCHAR											 uctFAW32;						// 
+	UCHAR											 ucMR5lo;					// 
+	UCHAR											 ucMR5hi;					// 
+	UCHAR											 ucTerminator;
+}ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT_V2
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+	USHORT										 usMRS;							// mode register						
+	USHORT										 usEMRS;						// extended mode register
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+	UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+////////////////////////////////////GDDR parameters///////////////////////////////////
+	UCHAR											 uctCCDL;						// 
+	UCHAR											 uctCRCRL;						// 
+	UCHAR											 uctCRCWL;						// 
+	UCHAR											 uctCKE;						// 
+	UCHAR											 uctCKRSE;						// 
+	UCHAR											 uctCKRSX;						// 
+	UCHAR											 uctFAW32;						// 
+	UCHAR											 ucMR4lo;					// 
+	UCHAR											 ucMR4hi;					// 
+	UCHAR											 ucMR5lo;					// 
+	UCHAR											 ucMR5hi;					// 
+	UCHAR											 ucTerminator;
+	UCHAR											 ucReserved;	
+}ATOM_MEMORY_TIMING_FORMAT_V2;
+
+typedef	struct _ATOM_MEMORY_FORMAT
+{
+	ULONG											 ulDllDisClock;			// memory DLL will be disable when target memory clock is below this clock
+  union{
+    USHORT                     usEMRS2Value;      // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+    USHORT                     usDDR3_Reserved;   // Not used for DDR3 memory
+  };
+  union{
+    USHORT                     usEMRS3Value;      // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+    USHORT                     usDDR3_MR3;        // Used for DDR3 memory
+  };
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+	UCHAR											 ucBurstSize;				// burst size, 0= burst size=4  1= burst size=8
+  UCHAR                      ucDllDisBit;				// position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
+  UCHAR                      ucRefreshRateFactor;	// memory refresh rate in unit of ms	
+	UCHAR											 ucDensity;					// _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR											 ucPreamble;				//[7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR											 ucMemAttrib;				// Memory Device Addribute, like RDBI/WDBI etc
+	ATOM_MEMORY_TIMING_FORMAT	 asMemTiming[5];		//Memory Timing block sort from lower clock to higher clock
+}ATOM_MEMORY_FORMAT;
+
+
+typedef struct _ATOM_VRAM_MODULE_V3
+{
+	ULONG											 ulChannelMapCfg;		// board dependent paramenter:Channel combination
+	USHORT										 usSize;						// size of ATOM_VRAM_MODULE_V3
+  USHORT                     usDefaultMVDDQ;		// board dependent parameter:Default Memory Core Voltage
+  USHORT                     usDefaultMVDDC;		// board dependent parameter:Default Memory IO Voltage
+	UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucChannelNum;      // board dependent parameter:Number of channel;
+	UCHAR											 ucChannelSize;			// board dependent parameter:32bit or 64bit	
+	UCHAR											 ucVREFI;						// board dependnt parameter: EXT or INT +160mv to -140mv
+	UCHAR											 ucNPL_RT;					// board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+	UCHAR											 ucFlag;						// To enable/disable functionalities based on memory type
+	ATOM_MEMORY_FORMAT				 asMemory;					// describ all of video memory parameters from memory spec
+}ATOM_VRAM_MODULE_V3;
+
+
+//ATOM_VRAM_MODULE_V3.ucNPL_RT
+#define NPL_RT_MASK															0x0f
+#define BATTERY_ODT_MASK												0xc0
+
+#define ATOM_VRAM_MODULE		 ATOM_VRAM_MODULE_V3
+
+typedef struct _ATOM_VRAM_MODULE_V4
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  union{
+    USHORT	usEMRS2Value;                   // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+    USHORT  usDDR3_Reserved;
+  };
+  union{
+    USHORT	usEMRS3Value;                   // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+    USHORT  usDDR3_MR3;                     // Used for DDR3 memory
+  };  
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR   ucReserved2[2];
+  ATOM_MEMORY_TIMING_FORMAT  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V4;
+
+#define VRAM_MODULE_V4_MISC_RANK_MASK       0x3
+#define VRAM_MODULE_V4_MISC_DUAL_RANK       0x1
+#define VRAM_MODULE_V4_MISC_BL_MASK         0x4
+#define VRAM_MODULE_V4_MISC_BL8             0x4
+#define VRAM_MODULE_V4_MISC_DUAL_CS         0x10
+
+typedef struct _ATOM_VRAM_MODULE_V5
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  USHORT	usEMRS2Value;      		            // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT	usEMRS3Value;      		            // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;			                // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+  UCHAR   ucCDR_Bandwidth;		   // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  ATOM_MEMORY_TIMING_FORMAT_V1  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_MODULE_V6
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  USHORT	usEMRS2Value;      		            // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT	usEMRS3Value;      		            // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;			                // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+  UCHAR   ucCDR_Bandwidth;		   // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  ATOM_MEMORY_TIMING_FORMAT_V2  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V6;
+
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+  ULONG	  ulChannelMapCfg;	                // mmMC_SHARED_CHREMAP
+  USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
+  USHORT  usPrivateReserved;                // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usEnableChannels;                 // bit vector which indicate which channels are enabled
+  UCHAR   ucExtMemoryID;                    // Current memory module ID
+  UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+  UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
+  UCHAR   ucChannelWidth;                   // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+  UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+  UCHAR	  ucReserve;                        // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+  UCHAR	  ucMisc;                           // RANK_OF_THISMEMORY etc.
+  UCHAR	  ucVREFI;                          // Not used.
+  UCHAR   ucNPL_RT;                         // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+  UCHAR	  ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  USHORT  usSEQSettingOffset;
+  UCHAR   ucReserved;
+// Memory Module specific values
+  USHORT  usEMRS2Value;                     // EMRS2/MR2 Value. 
+  USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
+  UCHAR   ucMemoryVenderID;                 // [7:4] Revision, [3:0] Vendor code
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;                      // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+  UCHAR   ucCDR_Bandwidth;                  // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  char    strMemPNString[20];               // part number end with '0'. 
+}ATOM_VRAM_MODULE_V7;
+
+typedef struct _ATOM_VRAM_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE           aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_V2;
+
+typedef struct _ATOM_VRAM_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+	USHORT										 usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+	USHORT										 usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+	USHORT										 usRerseved;
+	UCHAR           	         aVID_PinsShift[9];															 // 8 bit strap maximum+terminator
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE		       aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+	ATOM_INIT_REG_BLOCK				 asMemPatch;																		 // for allocation
+																																						 //	ATOM_INIT_REG_BLOCK				 aMemAdjust;
+}ATOM_VRAM_INFO_V3;
+
+#define	ATOM_VRAM_INFO_LAST	     ATOM_VRAM_INFO_V3
+
+typedef struct _ATOM_VRAM_INFO_V4
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT										 usRerseved;
+  UCHAR           	         ucMemDQ7_0ByteRemap;													   // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+  ULONG                      ulMemDQ7_0BitRemap;                             // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+  UCHAR                      ucReservde[4]; 
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE_V4		     aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+	ATOM_INIT_REG_BLOCK				 asMemPatch;																		 // for allocation
+																																						 //	ATOM_INIT_REG_BLOCK				 aMemAdjust;
+}ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                     usPerBytePresetOffset;                          // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
+  USHORT                     usReserved[3];
+  UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
+  UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
+  UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
+  UCHAR                      ucReserved; 
+  ATOM_VRAM_MODULE_V7		     aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR           	         aVID_PinsShift[9];   //8 bit strap maximum+terminator
+}ATOM_VRAM_GPIO_DETECTION_INFO;
+
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO
+{
+	ATOM_COMMON_TABLE_HEADER   sHeader;
+	UCHAR											 ucTrainingLoop;
+	UCHAR											 ucReserved[3];
+	ATOM_INIT_REG_BLOCK				 asMemTrainingSetting;
+}ATOM_MEMORY_TRAINING_INFO;
+
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS
+{
+  UCHAR    ucControl;
+  UCHAR    ucData; 
+  UCHAR    ucSatus; 
+  UCHAR    ucTemp; 
+} SW_I2C_CNTL_DATA_PARAMETERS;
+
+#define SW_I2C_CNTL_DATA_PS_ALLOCATION  SW_I2C_CNTL_DATA_PARAMETERS
+
+typedef struct _SW_I2C_IO_DATA_PARAMETERS
+{                               
+  USHORT   GPIO_Info;
+  UCHAR    ucAct; 
+  UCHAR    ucData; 
+ } SW_I2C_IO_DATA_PARAMETERS;
+
+#define SW_I2C_IO_DATA_PS_ALLOCATION  SW_I2C_IO_DATA_PARAMETERS
+
+/****************************SW I2C CNTL DEFINITIONS**********************/
+#define SW_I2C_IO_RESET       0
+#define SW_I2C_IO_GET         1
+#define SW_I2C_IO_DRIVE       2
+#define SW_I2C_IO_SET         3
+#define SW_I2C_IO_START       4
+
+#define SW_I2C_IO_CLOCK       0
+#define SW_I2C_IO_DATA        0x80
+
+#define SW_I2C_IO_ZERO        0
+#define SW_I2C_IO_ONE         0x100
+
+#define SW_I2C_CNTL_READ      0
+#define SW_I2C_CNTL_WRITE     1
+#define SW_I2C_CNTL_START     2
+#define SW_I2C_CNTL_STOP      3
+#define SW_I2C_CNTL_OPEN      4
+#define SW_I2C_CNTL_CLOSE     5
+#define SW_I2C_CNTL_WRITE1BIT 6
+
+//==============================VESA definition Portion===============================
+#define VESA_OEM_PRODUCT_REV			            "01.00"
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT	     0xBB	//refer to VBE spec p.32, no TTY support
+#define VESA_MODE_WIN_ATTRIBUTE						     7
+#define VESA_WIN_SIZE											     64
+
+typedef struct _PTR_32_BIT_STRUCTURE
+{
+	USHORT	Offset16;			
+	USHORT	Segment16;				
+} PTR_32_BIT_STRUCTURE;
+
+typedef union _PTR_32_BIT_UNION
+{
+	PTR_32_BIT_STRUCTURE	SegmentOffset;
+	ULONG					        Ptr32_Bit;
+} PTR_32_BIT_UNION;
+
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
+{
+	UCHAR				      VbeSignature[4];
+	USHORT				    VbeVersion;
+	PTR_32_BIT_UNION	OemStringPtr;
+	UCHAR				      Capabilities[4];
+	PTR_32_BIT_UNION	VideoModePtr;
+	USHORT				    TotalMemory;
+} VBE_1_2_INFO_BLOCK_UPDATABLE;
+
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
+{
+	VBE_1_2_INFO_BLOCK_UPDATABLE	CommonBlock;
+	USHORT							    OemSoftRev;
+	PTR_32_BIT_UNION				OemVendorNamePtr;
+	PTR_32_BIT_UNION				OemProductNamePtr;
+	PTR_32_BIT_UNION				OemProductRevPtr;
+} VBE_2_0_INFO_BLOCK_UPDATABLE;
+
+typedef union _VBE_VERSION_UNION
+{
+	VBE_2_0_INFO_BLOCK_UPDATABLE	VBE_2_0_InfoBlock;
+	VBE_1_2_INFO_BLOCK_UPDATABLE	VBE_1_2_InfoBlock;
+} VBE_VERSION_UNION;
+
+typedef struct _VBE_INFO_BLOCK
+{
+	VBE_VERSION_UNION			UpdatableVBE_Info;
+	UCHAR						      Reserved[222];
+	UCHAR						      OemData[256];
+} VBE_INFO_BLOCK;
+
+typedef struct _VBE_FP_INFO
+{
+  USHORT	HSize;
+	USHORT	VSize;
+	USHORT	FPType;
+	UCHAR		RedBPP;
+	UCHAR		GreenBPP;
+	UCHAR		BlueBPP;
+	UCHAR		ReservedBPP;
+	ULONG		RsvdOffScrnMemSize;
+	ULONG		RsvdOffScrnMEmPtr;
+	UCHAR		Reserved[14];
+} VBE_FP_INFO;
+
+typedef struct _VESA_MODE_INFO_BLOCK
+{
+// Mandatory information for all VBE revisions
+  USHORT    ModeAttributes;  //			dw	?	; mode attributes
+	UCHAR     WinAAttributes;  //			db	?	; window A attributes
+	UCHAR     WinBAttributes;  //			db	?	; window B attributes
+	USHORT    WinGranularity;  //			dw	?	; window granularity
+	USHORT    WinSize;         //			dw	?	; window size
+	USHORT    WinASegment;     //			dw	?	; window A start segment
+	USHORT    WinBSegment;     //			dw	?	; window B start segment
+	ULONG     WinFuncPtr;      //			dd	?	; real mode pointer to window function
+	USHORT    BytesPerScanLine;//			dw	?	; bytes per scan line
+
+//; Mandatory information for VBE 1.2 and above
+  USHORT    XResolution;      //			dw	?	; horizontal resolution in pixels or characters
+	USHORT    YResolution;      //			dw	?	; vertical resolution in pixels or characters
+	UCHAR     XCharSize;        //			db	?	; character cell width in pixels
+	UCHAR     YCharSize;        //			db	?	; character cell height in pixels
+	UCHAR     NumberOfPlanes;   //			db	?	; number of memory planes
+	UCHAR     BitsPerPixel;     //			db	?	; bits per pixel
+	UCHAR     NumberOfBanks;    //			db	?	; number of banks
+	UCHAR     MemoryModel;      //			db	?	; memory model type
+	UCHAR     BankSize;         //			db	?	; bank size in KB
+	UCHAR     NumberOfImagePages;//		  db	?	; number of images
+	UCHAR     ReservedForPageFunction;//db	1	; reserved for page function
+
+//; Direct Color fields(required for direct/6 and YUV/7 memory models)
+	UCHAR			RedMaskSize;        //		db	?	; size of direct color red mask in bits
+	UCHAR			RedFieldPosition;   //		db	?	; bit position of lsb of red mask
+	UCHAR			GreenMaskSize;      //		db	?	; size of direct color green mask in bits
+	UCHAR			GreenFieldPosition; //		db	?	; bit position of lsb of green mask
+	UCHAR			BlueMaskSize;       //		db	?	; size of direct color blue mask in bits
+	UCHAR			BlueFieldPosition;  //		db	?	; bit position of lsb of blue mask
+	UCHAR			RsvdMaskSize;       //		db	?	; size of direct color reserved mask in bits
+	UCHAR			RsvdFieldPosition;  //		db	?	; bit position of lsb of reserved mask
+	UCHAR			DirectColorModeInfo;//		db	?	; direct color mode attributes
+
+//; Mandatory information for VBE 2.0 and above
+	ULONG			PhysBasePtr;        //		dd	?	; physical address for flat memory frame buffer
+	ULONG			Reserved_1;         //		dd	0	; reserved - always set to 0
+	USHORT		Reserved_2;         //	  dw	0	; reserved - always set to 0
+
+//; Mandatory information for VBE 3.0 and above
+	USHORT		LinBytesPerScanLine;  //	dw	?	; bytes per scan line for linear modes
+	UCHAR			BnkNumberOfImagePages;//	db	?	; number of images for banked modes
+	UCHAR			LinNumberOfImagPages; //	db	?	; number of images for linear modes
+	UCHAR			LinRedMaskSize;       //	db	?	; size of direct color red mask(linear modes)
+	UCHAR			LinRedFieldPosition;  //	db	?	; bit position of lsb of red mask(linear modes)
+	UCHAR			LinGreenMaskSize;     //	db	?	; size of direct color green mask(linear modes)
+	UCHAR			LinGreenFieldPosition;//	db	?	; bit position of lsb of green mask(linear modes)
+	UCHAR			LinBlueMaskSize;      //	db	?	; size of direct color blue mask(linear modes)
+	UCHAR			LinBlueFieldPosition; //	db	?	; bit position of lsb of blue mask(linear modes)
+	UCHAR			LinRsvdMaskSize;      //	db	?	; size of direct color reserved mask(linear modes)
+	UCHAR			LinRsvdFieldPosition; //	db	?	; bit position of lsb of reserved mask(linear modes)
+	ULONG			MaxPixelClock;        //	dd	?	; maximum pixel clock(in Hz) for graphics mode
+	UCHAR			Reserved;             //	db	190 dup (0)
+} VESA_MODE_INFO_BLOCK;
+
+// BIOS function CALLS
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE        0xA0	        // ATI Extended Function code
+#define ATOM_BIOS_FUNCTION_COP_MODE             0x00
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY1         0x04
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY2         0x05
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY3         0x06
+#define ATOM_BIOS_FUNCTION_GET_DDC              0x0B   
+#define ATOM_BIOS_FUNCTION_ASIC_DSTATE          0x0E
+#define ATOM_BIOS_FUNCTION_DEBUG_PLAY           0x0F
+#define ATOM_BIOS_FUNCTION_STV_STD              0x16
+#define ATOM_BIOS_FUNCTION_DEVICE_DET           0x17
+#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH        0x18
+
+#define ATOM_BIOS_FUNCTION_PANEL_CONTROL        0x82
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET       0x83
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH    0x84
+#define ATOM_BIOS_FUNCTION_HW_ICON              0x8A 
+#define ATOM_BIOS_FUNCTION_SET_CMOS             0x8B
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO        0x8000          // Sub function 80
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO      0x8100          // Sub function 80
+
+#define ATOM_BIOS_FUNCTION_DISPLAY_INFO         0x8D
+#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF        0x8E
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE          0x8F 
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE    0x0300          // Sub function 03  
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE          0x0700          // Sub function 7
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE  0x1400          // Notify caller the current thermal state
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300          // Notify caller the current critical state
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE          0x8500          // Sub function 85
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT    0x9400          // Notify caller that ADC is supported
+     
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS            0x4F10          // Set DPMS 
+#define ATOM_SUB_FUNCTION_SET_DPMS              0x0001          // BL: Sub function 01 
+#define ATOM_SUB_FUNCTION_GET_DPMS              0x0002          // BL: Sub function 02 
+#define ATOM_PARAMETER_VESA_DPMS_ON             0x0000          // BH Parameter for DPMS ON.  
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY        0x0100          // BH Parameter for DPMS STANDBY  
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND        0x0200          // BH Parameter for DPMS SUSPEND
+#define ATOM_PARAMETER_VESA_DPMS_OFF            0x0400          // BH Parameter for DPMS OFF
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON      0x0800          // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
+
+#define ATOM_BIOS_RETURN_CODE_MASK              0x0000FF00L
+#define ATOM_BIOS_REG_HIGH_MASK                 0x0000FF00L
+#define ATOM_BIOS_REG_LOW_MASK                  0x000000FFL
+
+// structure used for VBIOS only
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO
+{
+	USHORT usTransmitterObjId;
+	USHORT usSupportDevice;
+  UCHAR  ucTransmitterCmdTblId;
+	UCHAR  ucConfig;
+	UCHAR  ucEncoderID;					 //available 1st encoder ( default )
+	UCHAR  ucOptionEncoderID;    //available 2nd encoder ( optional )
+	UCHAR  uc2ndEncoderID;
+	UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO;
+
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE          0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE         0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK    0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A             0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B             0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C             0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D             0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E             0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F             0x84
+
+typedef struct _ASIC_ENCODER_INFO
+{
+	UCHAR ucEncoderID;
+	UCHAR ucEncoderConfig;
+  USHORT usEncoderCmdTblId;
+}ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+	ASIC_TRANSMITTER_INFO  asTransmitterInfo[1];
+	ASIC_ENCODER_INFO      asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary. 
+	ASIC_TRANSMITTER_INFO  asTransmitterInfo[1];
+	ASIC_ENCODER_INFO      asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO_V2;
+
+
+typedef struct _ATOM_DISP_CLOCK_ID {
+  UCHAR ucPpllId; 
+  UCHAR ucPpllAttribute;
+}ATOM_DISP_CLOCK_ID;
+
+// ucPpllAttribute
+#define CLOCK_SOURCE_SHAREABLE            0x01
+#define CLOCK_SOURCE_DP_MODE              0x02
+#define CLOCK_SOURCE_NONE_DP_MODE         0x04
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO_V2
+{
+	USHORT usTransmitterObjId;
+	USHORT usDispClkIdOffset;    // point to clock source id list supported by Encoder Object
+  UCHAR  ucTransmitterCmdTblId;
+	UCHAR  ucConfig;
+	UCHAR  ucEncoderID;					 // available 1st encoder ( default )
+	UCHAR  ucOptionEncoderID;    // available 2nd encoder ( optional )
+	UCHAR  uc2ndEncoderID;
+	UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO_V2;
+
+typedef struct _ATOM_DISP_OUT_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary. 
+  USHORT usReserved;
+  UCHAR  ucDCERevision;   
+  UCHAR  ucMaxDispEngineNum;
+  UCHAR  ucMaxActiveDispEngineNum;
+  UCHAR  ucMaxPPLLNum;
+  UCHAR  ucCoreRefClkSource;                          // value of CORE_REF_CLK_SOURCE
+  UCHAR  ucReserved[3];
+	ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[1];     // for alligment only
+}ATOM_DISP_OUT_INFO_V3;
+
+typedef enum CORE_REF_CLK_SOURCE{
+  CLOCK_SRC_XTALIN=0,
+  CLOCK_SRC_XO_IN=1,
+  CLOCK_SRC_XO_IN2=2,
+}CORE_REF_CLK_SOURCE;
+
+// DispDevicePriorityInfo
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT asDevicePriority[16];
+}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+{
+	USHORT	lpAuxRequest;
+	USHORT  lpDataOut;
+	UCHAR		ucChannelID;
+	union
+	{
+  UCHAR   ucReplyStatus;
+	UCHAR   ucDelay;
+	};
+  UCHAR   ucDataOutLen;
+	UCHAR   ucReserved;
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
+{
+	USHORT	lpAuxRequest;
+	USHORT  lpDataOut;
+	UCHAR		ucChannelID;
+	union
+	{
+  UCHAR   ucReplyStatus;
+	UCHAR   ucDelay;
+	};
+  UCHAR   ucDataOutLen;
+	UCHAR   ucHPD_ID;                                       //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
+
+#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION			PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+
+//GetSinkType
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS
+{
+	USHORT ucLinkClock;
+	union 
+	{
+	UCHAR ucConfig;				// for DP training command
+	UCHAR ucI2cId;				// use for GET_SINK_TYPE command
+	};
+	UCHAR ucAction;
+	UCHAR ucStatus;
+	UCHAR ucLaneNum;
+	UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS;
+
+// ucAction
+#define ATOM_DP_ACTION_GET_SINK_TYPE							0x01
+/* obselete */
+#define ATOM_DP_ACTION_TRAINING_START							0x02
+#define ATOM_DP_ACTION_TRAINING_COMPLETE					0x03
+#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL				0x04
+#define ATOM_DP_ACTION_SET_VSWING_PREEMP					0x05
+#define ATOM_DP_ACTION_GET_VSWING_PREEMP					0x06
+#define ATOM_DP_ACTION_BLANKING                   0x07
+
+// ucConfig
+#define ATOM_DP_CONFIG_ENCODER_SEL_MASK						0x03
+#define ATOM_DP_CONFIG_DIG1_ENCODER								0x00
+#define ATOM_DP_CONFIG_DIG2_ENCODER								0x01
+#define ATOM_DP_CONFIG_EXTERNAL_ENCODER						0x02
+#define ATOM_DP_CONFIG_LINK_SEL_MASK							0x04
+#define ATOM_DP_CONFIG_LINK_A											0x00
+#define ATOM_DP_CONFIG_LINK_B											0x04
+/* /obselete */
+#define DP_ENCODER_SERVICE_PS_ALLOCATION				WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+	USHORT usExtEncoderObjId;   // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+  UCHAR  ucAuxId;
+  UCHAR  ucAction;
+  UCHAR  ucSinkType;          // Iput and Output parameters. 
+  UCHAR  ucHPDId;             // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+	UCHAR  ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+  DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+  PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE							0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION			    0x02
+
+
+// DP_TRAINING_TABLE
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR				ATOM_DP_TRAINING_TBL_ADDR		
+#define DPCD_SET_SS_CNTL_TBL_ADDR													(ATOM_DP_TRAINING_TBL_ADDR + 8 )
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 16 )
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 24 )
+#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 32)
+#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 40)
+#define	DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 48)
+#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 60)
+#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR											(ATOM_DP_TRAINING_TBL_ADDR + 64)
+#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 72)
+#define DP_I2C_AUX_DDC_READ_TBL_ADDR											(ATOM_DP_TRAINING_TBL_ADDR + 76)
+#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR                 (ATOM_DP_TRAINING_TBL_ADDR + 80) 
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR									(ATOM_DP_TRAINING_TBL_ADDR + 84)
+
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+{
+	UCHAR   ucI2CSpeed;
+ 	union
+	{
+   UCHAR ucRegIndex;
+   UCHAR ucStatus;
+	};
+	USHORT  lpI2CDataOut;
+  UCHAR   ucFlag;               
+  UCHAR   ucTransBytes;
+  UCHAR   ucSlaveAddr;
+  UCHAR   ucLineNumber;
+}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION       PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+
+//ucFlag
+#define HW_I2C_WRITE        1
+#define HW_I2C_READ         0
+#define I2C_2BYTE_ADDR      0x02
+
+/****************************************************************************/	
+// Structures used by HW_Misc_OperationTable
+/****************************************************************************/	
+typedef struct  _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 
+{
+  UCHAR  ucCmd;                //  Input: To tell which action to take
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1; 
+
+typedef struct  _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1 
+{
+  UCHAR  ucReturnCode;        // Output: Return value base on action was taken
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
+
+// Actions code
+#define  ATOM_GET_SDI_SUPPORT              0xF0
+
+// Return code 
+#define  ATOM_UNKNOWN_CMD                   0
+#define  ATOM_FEATURE_NOT_SUPPORTED         1
+#define  ATOM_FEATURE_SUPPORTED             2
+
+typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
+{
+	ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1        sInput_Output;
+	PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS         sReserved; 
+}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
+
+/****************************************************************************/	
+
+typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+{
+   UCHAR ucHWBlkInst;                // HW block instance, 0, 1, 2, ...
+   UCHAR ucReserved[3]; 
+}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
+
+#define HWBLKINST_INSTANCE_MASK       0x07
+#define HWBLKINST_HWBLK_MASK          0xF0
+#define HWBLKINST_HWBLK_SHIFT         0x04
+
+//ucHWBlock
+#define SELECT_DISP_ENGINE            0
+#define SELECT_DISP_PLL               1
+#define SELECT_DCIO_UNIPHY_LINK0      2
+#define SELECT_DCIO_UNIPHY_LINK1      3
+#define SELECT_DCIO_IMPCAL            4
+#define SELECT_DCIO_DIG               6
+#define SELECT_CRTC_PIXEL_RATE        7
+#define SELECT_VGA_BLK                8
+
+// DIGTransmitterInfoTable structure used to program UNIPHY settings 
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{  
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT usDPVsPreEmphSettingOffset;     // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock 
+  USHORT usPhyAnalogRegListOffset;       // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info 
+  USHORT usPhyAnalogSettingOffset;       // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+  USHORT usPhyPllRegListOffset;          // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info 
+  USHORT usPhyPllSettingOffset;          // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+}DIG_TRANSMITTER_INFO_HEADER_V3_1;
+
+typedef struct _CLOCK_CONDITION_REGESTER_INFO{
+  USHORT usRegisterIndex;
+  UCHAR  ucStartBit;
+  UCHAR  ucEndBit;
+}CLOCK_CONDITION_REGESTER_INFO;
+
+typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
+  USHORT usMaxClockFreq;
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  ULONG  ulAnalogSetting[1];
+}CLOCK_CONDITION_SETTING_ENTRY;
+
+typedef struct _CLOCK_CONDITION_SETTING_INFO{
+  USHORT usEntrySize;
+  CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
+}CLOCK_CONDITION_SETTING_INFO;
+
+typedef struct _PHY_CONDITION_REG_VAL{
+  ULONG  ulCondition;
+  ULONG  ulRegVal;
+}PHY_CONDITION_REG_VAL;
+
+typedef struct _PHY_CONDITION_REG_INFO{
+  USHORT usRegIndex;
+  USHORT usSize;
+  PHY_CONDITION_REG_VAL asRegVal[1];
+}PHY_CONDITION_REG_INFO;
+
+typedef struct _PHY_ANALOG_SETTING_INFO{
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  USHORT usSize;
+  PHY_CONDITION_REG_INFO  asAnalogSetting[1];
+}PHY_ANALOG_SETTING_INFO;
+
+/****************************************************************************/	
+//Portion VI: Definitinos for vbios MC scratch registers that driver used
+/****************************************************************************/
+
+#define MC_MISC0__MEMORY_TYPE_MASK    0xF0000000
+#define MC_MISC0__MEMORY_TYPE__GDDR1  0x10000000
+#define MC_MISC0__MEMORY_TYPE__DDR2   0x20000000
+#define MC_MISC0__MEMORY_TYPE__GDDR3  0x30000000
+#define MC_MISC0__MEMORY_TYPE__GDDR4  0x40000000
+#define MC_MISC0__MEMORY_TYPE__GDDR5  0x50000000
+#define MC_MISC0__MEMORY_TYPE__DDR3   0xB0000000
+
+/****************************************************************************/	
+//Portion VI: Definitinos being oboselete
+/****************************************************************************/
+
+//==========================================================================================
+//Remove the definitions below when driver is ready!
+typedef struct _ATOM_DAC_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT                   usMaxFrequency;      // in 10kHz unit
+  USHORT                   usReserved;
+}ATOM_DAC_INFO;
+
+
+typedef struct  _COMPASSIONATE_DATA           
+{
+  ATOM_COMMON_TABLE_HEADER sHeader; 
+
+  //==============================  DAC1 portion
+  UCHAR   ucDAC1_BG_Adjustment;
+  UCHAR   ucDAC1_DAC_Adjustment;
+  USHORT  usDAC1_FORCE_Data;
+  //==============================  DAC2 portion
+  UCHAR   ucDAC2_CRT2_BG_Adjustment;
+  UCHAR   ucDAC2_CRT2_DAC_Adjustment;
+  USHORT  usDAC2_CRT2_FORCE_Data;
+  USHORT  usDAC2_CRT2_MUX_RegisterIndex;
+  UCHAR   ucDAC2_CRT2_MUX_RegisterInfo;     //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_NTSC_BG_Adjustment;
+  UCHAR   ucDAC2_NTSC_DAC_Adjustment;
+  USHORT  usDAC2_TV1_FORCE_Data;
+  USHORT  usDAC2_TV1_MUX_RegisterIndex;
+  UCHAR   ucDAC2_TV1_MUX_RegisterInfo;      //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_CV_BG_Adjustment;
+  UCHAR   ucDAC2_CV_DAC_Adjustment;
+  USHORT  usDAC2_CV_FORCE_Data;
+  USHORT  usDAC2_CV_MUX_RegisterIndex;
+  UCHAR   ucDAC2_CV_MUX_RegisterInfo;       //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_PAL_BG_Adjustment;
+  UCHAR   ucDAC2_PAL_DAC_Adjustment;
+  USHORT  usDAC2_TV2_FORCE_Data;
+}COMPASSIONATE_DATA;
+
+/****************************Supported Device Info Table Definitions**********************/
+//  ucConnectInfo:
+//    [7:4] - connector type
+//      = 1   - VGA connector   
+//      = 2   - DVI-I
+//      = 3   - DVI-D
+//      = 4   - DVI-A
+//      = 5   - SVIDEO
+//      = 6   - COMPOSITE
+//      = 7   - LVDS
+//      = 8   - DIGITAL LINK
+//      = 9   - SCART
+//      = 0xA - HDMI_type A
+//      = 0xB - HDMI_type B
+//      = 0xE - Special case1 (DVI+DIN)
+//      Others=TBD
+//    [3:0] - DAC Associated
+//      = 0   - no DAC
+//      = 1   - DACA
+//      = 2   - DACB
+//      = 3   - External DAC
+//      Others=TBD
+//    
+
+typedef struct _ATOM_CONNECTOR_INFO
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR   bfConnectorType:4;
+  UCHAR   bfAssociatedDAC:4;
+#else
+  UCHAR   bfAssociatedDAC:4;
+  UCHAR   bfConnectorType:4;
+#endif
+}ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS
+{
+  ATOM_CONNECTOR_INFO sbfAccess;
+  UCHAR               ucAccess;
+}ATOM_CONNECTOR_INFO_ACCESS;
+
+typedef struct _ATOM_CONNECTOR_INFO_I2C
+{
+  ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+  ATOM_I2C_ID_CONFIG_ACCESS  sucI2cId;
+}ATOM_CONNECTOR_INFO_I2C;
+
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C   asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+}ATOM_SUPPORTED_DEVICES_INFO;
+
+#define NO_INT_SRC_MAPPED       0xFF
+
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
+{
+  UCHAR   ucIntSrcBitmap;
+}ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
+{ 
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C       asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+  ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+}ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
+{ 
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C       asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+  ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_SUPPORTED_DEVICES_INFO_2d1;
+
+#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
+
+
+
+typedef struct _ATOM_MISC_CONTROL_INFO
+{
+   USHORT usFrequency;
+   UCHAR  ucPLL_ChargePump;				                // PLL charge-pump gain control
+   UCHAR  ucPLL_DutyCycle;				                // PLL duty cycle control
+   UCHAR  ucPLL_VCO_Gain;				                  // PLL VCO gain control
+   UCHAR  ucPLL_VoltageSwing;			                // PLL driver voltage swing control
+}ATOM_MISC_CONTROL_INFO;  
+
+
+#define ATOM_MAX_MISC_INFO       4
+
+typedef struct _ATOM_TMDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT							usMaxFrequency;             // in 10Khz
+  ATOM_MISC_CONTROL_INFO				asMiscInfo[ATOM_MAX_MISC_INFO];
+}ATOM_TMDS_INFO;
+
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
+{
+  UCHAR ucTVStandard;     //Same as TV standards defined above, 
+  UCHAR ucPadding[1];
+}ATOM_ENCODER_ANALOG_ATTRIBUTE;
+
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
+{
+  UCHAR ucAttribute;      //Same as other digital encoder attributes defined above
+  UCHAR ucPadding[1];		
+}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+
+typedef union _ATOM_ENCODER_ATTRIBUTE
+{
+  ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+  ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+}ATOM_ENCODER_ATTRIBUTE;
+
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock; 
+  USHORT usEncoderID; 
+  UCHAR  ucDeviceType;												//Use ATOM_DEVICE_xxx1_Index to indicate device type only.	
+  UCHAR  ucAction;														//ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  ATOM_ENCODER_ATTRIBUTE usDevAttr;     		
+}DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
+{                               
+  DVO_ENCODER_CONTROL_PARAMETERS    sDVOEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION      sReserved;     //Caller doesn't need to init this portion
+}DVO_ENCODER_CONTROL_PS_ALLOCATION;
+
+
+#define ATOM_XTMDS_ASIC_SI164_ID        1
+#define ATOM_XTMDS_ASIC_SI178_ID        2
+#define ATOM_XTMDS_ASIC_TFP513_ID       3
+#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
+#define ATOM_XTMDS_SUPPORTED_DUALLINK   0x00000002
+#define ATOM_XTMDS_MVPU_FPGA            0x00000004
+
+                           
+typedef struct _ATOM_XTMDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;  
+  USHORT                     usSingleLinkMaxFrequency; 
+  ATOM_I2C_ID_CONFIG_ACCESS  sucI2cId;           //Point the ID on which I2C is used to control external chip
+  UCHAR                      ucXtransimitterID;          
+  UCHAR                      ucSupportedLink;    // Bit field, bit0=1, single link supported;bit1=1,dual link supported
+  UCHAR                      ucSequnceAlterID;   // Even with the same external TMDS asic, it's possible that the program seqence alters 
+                                                 // due to design. This ID is used to alert driver that the sequence is not "standard"!              
+  UCHAR                      ucMasterAddress;    // Address to control Master xTMDS Chip
+  UCHAR                      ucSlaveAddress;     // Address to control Slave xTMDS Chip
+}ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
+{  
+  UCHAR ucEnable;                     // ATOM_ENABLE=On or ATOM_DISABLE=Off
+  UCHAR ucDevice;                     // ATOM_DEVICE_DFP1_INDEX....
+  UCHAR ucPadding[2];             
+}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+/****************************Legacy Power Play Table Definitions **********************/
+
+//Definitions for ulPowerPlayMiscInfo
+#define ATOM_PM_MISCINFO_SPLIT_CLOCK                     0x00000000L
+#define ATOM_PM_MISCINFO_USING_MCLK_SRC                  0x00000001L
+#define ATOM_PM_MISCINFO_USING_SCLK_SRC                  0x00000002L
+
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT            0x00000004L
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH        0x00000008L
+
+#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN             0x00000010L
+
+#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN          0x00000020L
+#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN          0x00000040L
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE                 0x00000080L  //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program  
+ 
+#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN      0x00000100L
+#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN         0x00000200L
+#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN              0x00000400L
+#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN                 0x00000800L
+#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE     0x00001000L
+#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
+#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE            0x00004000L
+
+#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE             0x00008000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE                 0x00010000L 
+#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE                 0x00020000L
+#define ATOM_PM_MISCINFO_POWER_SAVING_MODE               0x00040000L
+#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE              0x00080000L
+
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK           0x00300000L  //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT          20 
+
+#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE                 0x00400000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2      0x00800000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4      0x01000000L
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN            0x02000000L  //When set, Dynamic 
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN        0x04000000L  //When set, Dynamic
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN              0x08000000L  //When set, This mode is for acceleated 3D mode
+
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK   0x70000000L  //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) 
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT  28
+#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS                0x80000000L
+
+#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE            0x00000001L
+#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT          0x00000002L
+#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN           0x00000004L
+#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO            0x00000008L
+#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE              0x00000010L
+#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN       0x00000020L
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE         0x00000040L  //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. 
+                                                                      //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
+#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC                0x00000080L
+#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN                0x00000100L
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE               0x00000200L 
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct  _ATOM_POWERMODE_INFO
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulReserved1;                // must set to 0
+  ULONG     ulReserved2;                // must set to 0
+  USHORT    usEngineClock;
+  USHORT    usMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to GPIO table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+}ATOM_POWERMODE_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct  _ATOM_POWERMODE_INFO_V2
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulMiscInfo2;                
+  ULONG     ulEngineClock;                
+  ULONG     ulMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to GPIO table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+}ATOM_POWERMODE_INFO_V2;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct  _ATOM_POWERMODE_INFO_V3
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulMiscInfo2;                
+  ULONG     ulEngineClock;                
+  ULONG     ulMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to Core (VDDC) votage table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+  UCHAR     ucVDDCI_VoltageDropIndex;   // index to VDDCI votage table
+}ATOM_POWERMODE_INFO_V3;
+
+
+#define ATOM_MAX_NUMBEROF_POWER_BLOCK  8
+
+#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN            0x01
+#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE         0x02
+
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63      0x01
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032   0x02
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030   0x03
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649   0x04
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64      0x05
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375    0x06
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512   0x07	// Andigilog
+
+
+typedef struct  _ATOM_POWERPLAY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO;
+
+typedef struct  _ATOM_POWERPLAY_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V2;
+  
+typedef struct  _ATOM_POWERPLAY_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V3;
+
+/* New PPlib */
+/**************************************************************************/
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+    UCHAR ucType;           // one of ATOM_PP_THERMALCONTROLLER_*
+    UCHAR ucI2cLine;        // as interpreted by DAL I2C
+    UCHAR ucI2cAddress;
+    UCHAR ucFanParameters;  // Fan Control Parameters.
+    UCHAR ucFanMinRPM;      // Fan Minimum RPM (hundreds) -- for display purposes only.
+    UCHAR ucFanMaxRPM;      // Fan Maximum RPM (hundreds) -- for display purposes only.
+    UCHAR ucReserved;       // ----
+    UCHAR ucFlags;          // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN                                 0x80    // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE      0
+#define ATOM_PP_THERMALCONTROLLER_LM63      1  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032   2  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030   3  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649   4  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64      5
+#define ATOM_PP_THERMALCONTROLLER_F75375    6  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx     7
+#define ATOM_PP_THERMALCONTROLLER_RV770     8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473   9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS  16
+#define ATOM_PP_THERMALCONTROLLER_LM96163   17
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+    UCHAR ucNonClockStateIndex;
+    UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
+    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+    USHORT  usTMed;                          // The middle temperature where we change slopes.
+    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
+    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
+    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
+    USHORT  usPWMHigh;                       // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+    ATOM_PPLIB_FANTABLE basicTable;
+    USHORT  usTMax;                          // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+    USHORT  usSize;
+    ULONG   ulMaxEngineClock;   // For Overdrive.
+    ULONG   ulMaxMemoryClock;   // For Overdrive.
+    // Add extra system parameters here, always adjust size to include all fields.
+    USHORT  usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+    USHORT  usUVDTableOffset;   //points to ATOM_PPLIB_UVD_Table
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
+
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+      ATOM_COMMON_TABLE_HEADER sHeader;
+
+      UCHAR ucDataRevision;
+
+      UCHAR ucNumStates;
+      UCHAR ucStateEntrySize;
+      UCHAR ucClockInfoSize;
+      UCHAR ucNonClockSize;
+
+      // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+      USHORT usStateArrayOffset;
+
+      // offset from start of this table to array of ASIC-specific structures,
+      // currently ATOM_PPLIB_CLOCK_INFO.
+      USHORT usClockInfoArrayOffset;
+
+      // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+      USHORT usNonClockInfoArrayOffset;
+
+      USHORT usBackbiasTime;    // in microseconds
+      USHORT usVoltageTime;     // in microseconds
+      USHORT usTableSize;       //the size of this structure, or the extended structure
+
+      ULONG ulPlatformCaps;            // See ATOM_PPLIB_CAPS_*
+
+      ATOM_PPLIB_THERMALCONTROLLER    sThermalController;
+
+      USHORT usBootClockInfoOffset;
+      USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+    ATOM_PPLIB_POWERPLAYTABLE basicTable;
+    UCHAR   ucNumCustomThermalPolicy;
+    USHORT  usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
+    USHORT                     usFanTableOffset;
+    USHORT                     usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+    ULONG                      ulGoldenPPID;                    // PPGen use only     
+    ULONG                      ulGoldenRevision;                // PPGen use only
+    USHORT                     usVddcDependencyOnSCLKOffset;
+    USHORT                     usVddciDependencyOnMCLKOffset;
+    USHORT                     usVddcDependencyOnMCLKOffset;
+    USHORT                     usMaxClockVoltageOnDCOffset;
+    USHORT                     usVddcPhaseShedLimitsTableOffset;    // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+    USHORT                     usReserved;  
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+    ULONG                      ulTDPLimit;
+    ULONG                      ulNearTDPLimit;
+    ULONG                      ulSQRampingThreshold;
+    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
+    ULONG                      ulCACLeakage;                    // The iLeakage for driver calculated CAC leakage table
+    USHORT                     usTDPODLimit;
+    USHORT                     usLoadLineSlope;                 // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE          0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY       1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED      3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE   5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT                   0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL                0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE     0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST                   0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED                 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE          0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE      0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC                      0x0004   //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK            0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT           2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK            0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT           3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK  0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED    0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ         1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
+
+//M3 Arb    //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT                      17
+
+#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+    UCHAR   ucMinTemperature;
+    UCHAR   ucMaxTemperature;
+    UCHAR   ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+      USHORT usClassification;
+      UCHAR  ucMinTemperature;
+      UCHAR  ucMaxTemperature;
+      ULONG  ulCapsAndSettings;
+      UCHAR  ucRequiredPower;
+      USHORT usClassification2;
+      ULONG  ulVCLK;
+      ULONG  ulDCLK;
+      UCHAR  ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usUnused1;
+      USHORT usUnused2;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2          1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF   16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      USHORT usUnused;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      UCHAR  ucPCIEGen;
+      UCHAR  ucUnused1;
+
+      ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+      USHORT usLowEngineClockLow;         // Low Engine clock in MHz (the same way as on the R600).
+      UCHAR  ucLowEngineClockHigh;
+      USHORT usHighEngineClockLow;        // High Engine clock in MHz.
+      UCHAR  ucHighEngineClockHigh;
+      USHORT usMemoryClockLow;            // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+      UCHAR  ucMemoryClockHigh;           // Currentyl unused.
+      UCHAR  ucPadding;                   // For proper alignment and size.
+      USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
+      UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
+      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+      USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+      ULONG  ulFlags; 
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE       0 
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW        1 
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH       2 
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE   3 
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE        0   // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW         1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH        2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE       0 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
+      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
+      UCHAR  vddcIndex;         //2-bit vddc index;
+      USHORT tdpLimit;
+      //please initalize to 0
+      USHORT rsv1;
+      //please initialize to 0s
+      ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
+      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+      UCHAR ucNumDPMLevels;
+      
+      //a index to the array of nonClockInfos
+      UCHAR nonClockInfoIndex;
+      /**
+      * Driver will read the first ucNumDPMLevels in this array
+      */
+      UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+    //how many states we have 
+    UCHAR ucNumEntries;
+    
+    ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+    //how many clock levels we have
+    UCHAR ucNumEntries;
+    
+    //sizeof(ATOM_PPLIB_CLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+    //how many non-clock levels we have. normally should be same as number of states
+    UCHAR ucNumEntries;
+    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+    USHORT usClockLow;
+    UCHAR  ucClockHigh;
+    USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+    USHORT usVddc;
+    USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Record
+{
+    USHORT usVddc;  // We use this field for the "fake" standardized VDDC for power calculations                                                  
+    ULONG  ulLeakageValue;
+}ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+    USHORT usVoltage;
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+    USHORT usEVClkLow;
+    UCHAR  ucEVClkHigh;
+    USHORT usECClkLow;
+    UCHAR  ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+    UCHAR ucNumEntries;
+    VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+    UCHAR  ucVCEClockInfoIndex;
+    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+      UCHAR revid;
+//    VCEClockInfoArray array;
+//    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+//    ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+    USHORT usVClkLow;
+    UCHAR  ucVClkHigh;
+    USHORT usDClkLow;
+    UCHAR  ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+    UCHAR ucNumEntries;
+    UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_State_Record
+{
+    UCHAR  ucUVDClockInfoIndex;
+    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_UVD_State_Record;
+
+typedef struct _ATOM_PPLIB_UVD_State_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_UVD_State_Record entries[1];
+}ATOM_PPLIB_UVD_State_Table;
+
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+      UCHAR revid;
+//    UVDClockInfoArray array;
+//    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+//    ATOM_PPLIB_UVD_State_Table states;
+}ATOM_PPLIB_UVD_Table;
+
+/**************************************************************************/
+
+
+// Following definitions are for compatibility issue in different SW components. 
+#define ATOM_MASTER_DATA_TABLE_REVISION   0x01
+#define Object_Info												Object_Header			
+#define	AdjustARB_SEQ											MC_InitParameter
+#define	VRAM_GPIO_DetectionInfo						VoltageObjectInfo
+#define	ASIC_VDDCI_Info                   ASIC_ProfilingInfo														
+#define ASIC_MVDDQ_Info										MemoryTrainingInfo
+#define SS_Info                           PPLL_SS_Info                      
+#define ASIC_MVDDC_Info                   ASIC_InternalSS_Info
+#define DispDevicePriorityInfo						SaveRestoreInfo
+#define DispOutInfo												TV_VideoMode
+
+
+#define ATOM_ENCODER_OBJECT_TABLE         ATOM_OBJECT_TABLE
+#define ATOM_CONNECTOR_OBJECT_TABLE       ATOM_OBJECT_TABLE
+
+//New device naming, remove them when both DAL/VBIOS is ready
+#define DFP2I_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1X_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1I_OUTPUT_CONTROL_PARAMETERS    DFP1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define ATOM_DEVICE_DFP1I_SUPPORT          ATOM_DEVICE_DFP1_SUPPORT
+#define ATOM_DEVICE_DFP1X_SUPPORT          ATOM_DEVICE_DFP2_SUPPORT
+
+#define ATOM_DEVICE_DFP1I_INDEX            ATOM_DEVICE_DFP1_INDEX
+#define ATOM_DEVICE_DFP1X_INDEX            ATOM_DEVICE_DFP2_INDEX
+ 
+#define ATOM_DEVICE_DFP2I_INDEX            0x00000009
+#define ATOM_DEVICE_DFP2I_SUPPORT          (0x1L << ATOM_DEVICE_DFP2I_INDEX)
+
+#define ATOM_S0_DFP1I                      ATOM_S0_DFP1
+#define ATOM_S0_DFP1X                      ATOM_S0_DFP2
+
+#define ATOM_S0_DFP2I                      0x00200000L
+#define ATOM_S0_DFP2Ib2                    0x20
+
+#define ATOM_S2_DFP1I_DPMS_STATE           ATOM_S2_DFP1_DPMS_STATE
+#define ATOM_S2_DFP1X_DPMS_STATE           ATOM_S2_DFP2_DPMS_STATE
+
+#define ATOM_S2_DFP2I_DPMS_STATE           0x02000000L
+#define ATOM_S2_DFP2I_DPMS_STATEb3         0x02
+
+#define ATOM_S3_DFP2I_ACTIVEb1             0x02
+
+#define ATOM_S3_DFP1I_ACTIVE               ATOM_S3_DFP1_ACTIVE 
+#define ATOM_S3_DFP1X_ACTIVE               ATOM_S3_DFP2_ACTIVE
+
+#define ATOM_S3_DFP2I_ACTIVE               0x00000200L
+
+#define ATOM_S3_DFP1I_CRTC_ACTIVE          ATOM_S3_DFP1_CRTC_ACTIVE
+#define ATOM_S3_DFP1X_CRTC_ACTIVE          ATOM_S3_DFP2_CRTC_ACTIVE
+#define ATOM_S3_DFP2I_CRTC_ACTIVE          0x02000000L
+
+#define ATOM_S3_DFP2I_CRTC_ACTIVEb3        0x02
+#define ATOM_S5_DOS_REQ_DFP2Ib1            0x02
+
+#define ATOM_S5_DOS_REQ_DFP2I              0x0200
+#define ATOM_S6_ACC_REQ_DFP1I              ATOM_S6_ACC_REQ_DFP1
+#define ATOM_S6_ACC_REQ_DFP1X              ATOM_S6_ACC_REQ_DFP2
+
+#define ATOM_S6_ACC_REQ_DFP2Ib3            0x02
+#define ATOM_S6_ACC_REQ_DFP2I              0x02000000L
+
+#define TMDS1XEncoderControl               DVOEncoderControl           
+#define DFP1XOutputControl                 DVOOutputControl
+
+#define ExternalDFPOutputControl           DFP1XOutputControl
+#define EnableExternalTMDS_Encoder         TMDS1XEncoderControl
+
+#define DFP1IOutputControl                 TMDSAOutputControl
+#define DFP2IOutputControl                 LVTMAOutputControl      
+
+#define DAC1_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define DAC2_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define ucDac1Standard  ucDacStandard
+#define ucDac2Standard  ucDacStandard  
+
+#define TMDS1EncoderControl TMDSAEncoderControl
+#define TMDS2EncoderControl LVTMAEncoderControl
+
+#define DFP1OutputControl   TMDSAOutputControl
+#define DFP2OutputControl   LVTMAOutputControl
+#define CRT1OutputControl   DAC1OutputControl
+#define CRT2OutputControl   DAC2OutputControl
+
+//These two lines will be removed for sure in a few days, will follow up with Michael V.
+#define EnableLVDS_SS   EnableSpreadSpectrumOnPPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3  ENABLE_SPREAD_SPECTRUM_ON_PPLL  
+
+//#define ATOM_S2_CRT1_DPMS_STATE         0x00010000L
+//#define ATOM_S2_LCD1_DPMS_STATE	        ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_TV1_DPMS_STATE          ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_DFP1_DPMS_STATE         ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_CRT2_DPMS_STATE         ATOM_S2_CRT1_DPMS_STATE
+
+#define ATOM_S6_ACC_REQ_TV2             0x00400000L
+#define ATOM_DEVICE_TV2_INDEX           0x00000006
+#define ATOM_DEVICE_TV2_SUPPORT         (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_S0_TV2                     0x00100000L
+#define ATOM_S3_TV2_ACTIVE              ATOM_S3_DFP6_ACTIVE
+#define ATOM_S3_TV2_CRTC_ACTIVE         ATOM_S3_DFP6_CRTC_ACTIVE
+
+//
+#define ATOM_S2_CRT1_DPMS_STATE         0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE	        0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE          0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE         0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE         0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE         0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE          0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE         0x00800000L
+#define ATOM_S2_CV_DPMS_STATE           0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE					0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE					0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE					0x08000000L
+
+#define ATOM_S2_CRT1_DPMS_STATEb2       0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2       0x02
+#define ATOM_S2_TV1_DPMS_STATEb2        0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2       0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2       0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2       0x20
+#define ATOM_S2_TV2_DPMS_STATEb2        0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2       0x80
+#define ATOM_S2_CV_DPMS_STATEb3         0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3				0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3				0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3				0x08
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3	0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3  0x80
+
+/*********************************************************************************/
+
+#pragma pack() // BIOS data must use byte aligment
+
+//
+// AMD ACPI Table
+//
+#pragma pack(1)
+
+typedef struct {
+  ULONG Signature;
+  ULONG TableLength;      //Length
+  UCHAR Revision;
+  UCHAR Checksum;
+  UCHAR OemId[6];
+  UCHAR OemTableId[8];    //UINT64  OemTableId;
+  ULONG OemRevision;
+  ULONG CreatorId;
+  ULONG CreatorRevision;
+} AMD_ACPI_DESCRIPTION_HEADER;
+/*
+//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
+typedef struct {
+  UINT32  Signature;       //0x0
+  UINT32  Length;          //0x4
+  UINT8   Revision;        //0x8
+  UINT8   Checksum;        //0x9
+  UINT8   OemId[6];        //0xA
+  UINT64  OemTableId;      //0x10
+  UINT32  OemRevision;     //0x18
+  UINT32  CreatorId;       //0x1C
+  UINT32  CreatorRevision; //0x20
+}EFI_ACPI_DESCRIPTION_HEADER;
+*/
+typedef struct {
+  AMD_ACPI_DESCRIPTION_HEADER SHeader;
+  UCHAR TableUUID[16];    //0x24
+  ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
+  ULONG Lib1ImageOffset;  //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+  ULONG Reserved[4];      //0x3C
+}UEFI_ACPI_VFCT;
+
+typedef struct {
+  ULONG  PCIBus;          //0x4C
+  ULONG  PCIDevice;       //0x50
+  ULONG  PCIFunction;     //0x54
+  USHORT VendorID;        //0x58
+  USHORT DeviceID;        //0x5A
+  USHORT SSVID;           //0x5C
+  USHORT SSID;            //0x5E
+  ULONG  Revision;        //0x60
+  ULONG  ImageLength;     //0x64
+}VFCT_IMAGE_HEADER;
+
+
+typedef struct {
+  VFCT_IMAGE_HEADER	VbiosHeader;
+  UCHAR	VbiosContent[1];
+}GOP_VBIOS_CONTENT;
+
+typedef struct {
+  VFCT_IMAGE_HEADER	Lib1Header;
+  UCHAR	Lib1Content[1];
+}GOP_LIB1_CONTENT;
+
+#pragma pack()
+
+
+#endif /* _ATOMBIOS_H */


Property changes on: trunk/sys/dev/drm2/radeon/atombios.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atombios_crtc.c
===================================================================
--- trunk/sys/dev/drm2/radeon/atombios_crtc.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atombios_crtc.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1940 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atombios_crtc.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include <dev/drm2/drm_fixed.h>
+#include "radeon.h"
+#include "atom.h"
+#include "atom-bits.h"
+
+static void atombios_overscan_setup(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	SET_CRTC_OVERSCAN_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
+	int a1, a2;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	switch (radeon_crtc->rmx_type) {
+	case RMX_CENTER:
+		args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		break;
+	case RMX_ASPECT:
+		a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
+		a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
+
+		if (a1 > a2) {
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+		} else if (a2 > a1) {
+			args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+			args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+		}
+		break;
+	case RMX_FULL:
+	default:
+		args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+		args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
+		break;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_scaler_setup(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	ENABLE_SCALER_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	/* fixme - fill in enc_priv for atom dac */
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+	bool is_tv = false, is_cv = false;
+
+	if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
+		return;
+
+	if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+		tv_std = tv_dac->tv_std;
+		is_tv = true;
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucScaler = radeon_crtc->crtc_id;
+
+	if (is_tv) {
+		switch (tv_std) {
+		case TV_STD_NTSC:
+		default:
+			args.ucTVStandard = ATOM_TV_NTSC;
+			break;
+		case TV_STD_PAL:
+			args.ucTVStandard = ATOM_TV_PAL;
+			break;
+		case TV_STD_PAL_M:
+			args.ucTVStandard = ATOM_TV_PALM;
+			break;
+		case TV_STD_PAL_60:
+			args.ucTVStandard = ATOM_TV_PAL60;
+			break;
+		case TV_STD_NTSC_J:
+			args.ucTVStandard = ATOM_TV_NTSCJ;
+			break;
+		case TV_STD_SCART_PAL:
+			args.ucTVStandard = ATOM_TV_PAL; /* ??? */
+			break;
+		case TV_STD_SECAM:
+			args.ucTVStandard = ATOM_TV_SECAM;
+			break;
+		case TV_STD_PAL_CN:
+			args.ucTVStandard = ATOM_TV_PALCN;
+			break;
+		}
+		args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+	} else if (is_cv) {
+		args.ucTVStandard = ATOM_TV_CV;
+		args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+	} else {
+		switch (radeon_crtc->rmx_type) {
+		case RMX_FULL:
+			args.ucEnable = ATOM_SCALER_EXPANSION;
+			break;
+		case RMX_CENTER:
+			args.ucEnable = ATOM_SCALER_CENTER;
+			break;
+		case RMX_ASPECT:
+			args.ucEnable = ATOM_SCALER_EXPANSION;
+			break;
+		default:
+			if (ASIC_IS_AVIVO(rdev))
+				args.ucEnable = ATOM_SCALER_DISABLE;
+			else
+				args.ucEnable = ATOM_SCALER_CENTER;
+			break;
+		}
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	if ((is_tv || is_cv)
+	    && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
+		atom_rv515_force_tv_scaler(rdev, radeon_crtc);
+	}
+}
+
+static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index =
+	    GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = lock;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
+	BLANK_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucBlanking = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
+	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucDispPipeId = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		radeon_crtc->enabled = true;
+		/* adjust pm to dpms changes BEFORE enabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		atombios_enable_crtc(crtc, ATOM_ENABLE);
+		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+		atombios_blank_crtc(crtc, ATOM_DISABLE);
+		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		radeon_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+		if (radeon_crtc->enabled)
+			atombios_blank_crtc(crtc, ATOM_ENABLE);
+		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+			atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+		atombios_enable_crtc(crtc, ATOM_DISABLE);
+		radeon_crtc->enabled = false;
+		/* adjust pm to dpms changes AFTER disabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		break;
+	}
+}
+
+static void
+atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+			     struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+	u16 misc = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2));
+	args.usH_Blanking_Time =
+		cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2));
+	args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2));
+	args.usV_Blanking_Time =
+		cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2));
+	args.usH_SyncOffset =
+		cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border);
+	args.usH_SyncWidth =
+		cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+	args.usV_SyncOffset =
+		cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border);
+	args.usV_SyncWidth =
+		cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+	args.ucH_Border = radeon_crtc->h_border;
+	args.ucV_Border = radeon_crtc->v_border;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= ATOM_VSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= ATOM_HSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_CSYNC)
+		misc |= ATOM_COMPOSITESYNC;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		misc |= ATOM_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+				     struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
+	u16 misc = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.usH_Total = cpu_to_le16(mode->crtc_htotal);
+	args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay);
+	args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start);
+	args.usH_SyncWidth =
+		cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+	args.usV_Total = cpu_to_le16(mode->crtc_vtotal);
+	args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay);
+	args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start);
+	args.usV_SyncWidth =
+		cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+
+	args.ucOverscanRight = radeon_crtc->h_border;
+	args.ucOverscanLeft = radeon_crtc->h_border;
+	args.ucOverscanBottom = radeon_crtc->v_border;
+	args.ucOverscanTop = radeon_crtc->v_border;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= ATOM_VSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= ATOM_HSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_CSYNC)
+		misc |= ATOM_COMPOSITESYNC;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		misc |= ATOM_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
+{
+	u32 ss_cntl;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
+			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+			WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_PPLL2:
+			ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
+			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+			WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_DCPLL:
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+			ss_cntl &= ~1;
+			WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_PPLL2:
+			ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+			ss_cntl &= ~1;
+			WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_DCPLL:
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+	}
+}
+
+
+union atom_enable_ss {
+	ENABLE_LVDS_SS_PARAMETERS lvds_ss;
+	ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
+};
+
+static void atombios_crtc_program_ss(struct radeon_device *rdev,
+				     int enable,
+				     int pll_id,
+				     int crtc_id,
+				     struct radeon_atom_ss *ss)
+{
+	unsigned i;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
+	union atom_enable_ss args;
+
+	if (!enable) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->mode_info.crtcs[i] &&
+			    rdev->mode_info.crtcs[i]->enabled &&
+			    i != crtc_id &&
+			    pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+				/* one other crtc is using this pll don't turn
+				 * off spread spectrum as it might turn off
+				 * display on active crtc
+				 */
+				return;
+			}
+		}
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	if (ASIC_IS_DCE5(rdev)) {
+		args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
+		args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+			break;
+		case ATOM_PPLL2:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+			break;
+		case ATOM_DCPLL:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+			break;
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+		args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+		args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+		args.v3.ucEnable = enable;
+		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
+			args.v3.ucEnable = ATOM_DISABLE;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
+			break;
+		case ATOM_PPLL2:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
+			break;
+		case ATOM_DCPLL:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
+			break;
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+		args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+		args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+		args.v2.ucEnable = enable;
+		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
+			args.v2.ucEnable = ATOM_DISABLE;
+	} else if (ASIC_IS_DCE3(rdev)) {
+		args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.v1.ucSpreadSpectrumStep = ss->step;
+		args.v1.ucSpreadSpectrumDelay = ss->delay;
+		args.v1.ucSpreadSpectrumRange = ss->range;
+		args.v1.ucPpll = pll_id;
+		args.v1.ucEnable = enable;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+		    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+			atombios_disable_ss(rdev, pll_id);
+			return;
+		}
+		args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
+		args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
+		args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
+		args.lvds_ss_2.ucEnable = enable;
+	} else {
+		if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+		    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+			atombios_disable_ss(rdev, pll_id);
+			return;
+		}
+		args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
+		args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
+		args.lvds_ss.ucEnable = enable;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union adjust_pixel_clock {
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
+};
+
+static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder = radeon_crtc->encoder;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	u32 adjusted_clock = mode->clock;
+	int encoder_mode = atombios_get_encoder_mode(encoder);
+	u32 dp_clock = mode->clock;
+	int bpc = radeon_get_monitor_bpc(connector);
+	bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
+
+	/* reset the pll flags */
+	radeon_crtc->pll_flags = 0;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		if ((rdev->family == CHIP_RS600) ||
+		    (rdev->family == CHIP_RS690) ||
+		    (rdev->family == CHIP_RS740))
+			radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
+				RADEON_PLL_PREFER_CLOSEST_LOWER);
+
+		if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)	/* range limits??? */
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+		else
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+		if (rdev->family < CHIP_RV770)
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+		/* use frac fb div on APUs */
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+		/* use frac fb div on RS780/RS880 */
+		if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+		if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+	} else {
+		radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
+
+		if (mode->clock > 200000)	/* range limits??? */
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+		else
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+	}
+
+	if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+		if (connector) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			struct radeon_connector_atom_dig *dig_connector =
+				radeon_connector->con_priv;
+
+			dp_clock = dig_connector->dp_clock;
+		}
+	}
+
+	/* use recommended ref_div for ss */
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+		if (radeon_crtc->ss_enabled) {
+			if (radeon_crtc->ss.refdiv) {
+				radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+				radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
+				if (ASIC_IS_AVIVO(rdev))
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+			}
+		}
+	}
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+		if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+			adjusted_clock = mode->clock * 2;
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
+	} else {
+		if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+			radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+		if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+	}
+
+	/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
+	 * accordingly based on the encoder/transmitter to work around
+	 * special hw requirements.
+	 */
+	if (ASIC_IS_DCE3(rdev)) {
+		union adjust_pixel_clock args;
+		u8 frev, crev;
+		int index;
+
+		index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+					   &crev))
+			return adjusted_clock;
+
+		memset(&args, 0, sizeof(args));
+
+		switch (frev) {
+		case 1:
+			switch (crev) {
+			case 1:
+			case 2:
+				args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+				args.v1.ucTransmitterID = radeon_encoder->encoder_id;
+				args.v1.ucEncodeMode = encoder_mode;
+				if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
+					args.v1.ucConfig |=
+						ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+
+				atom_execute_table(rdev->mode_info.atom_context,
+						   index, (uint32_t *)&args);
+				adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
+				break;
+			case 3:
+				args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+				args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
+				args.v3.sInput.ucEncodeMode = encoder_mode;
+				args.v3.sInput.ucDispPllConfig = 0;
+				if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_SS_ENABLE;
+				if (ENCODER_MODE_IS_DP(encoder_mode)) {
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_COHERENT_MODE;
+					/* 16200 or 27000 */
+					args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+				} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+					struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+					if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
+						/* deep color support */
+						args.v3.sInput.usPixelClock =
+							cpu_to_le16((mode->clock * bpc / 8) / 10);
+					if (dig->coherent_mode)
+						args.v3.sInput.ucDispPllConfig |=
+							DISPPLL_CONFIG_COHERENT_MODE;
+					if (is_duallink)
+						args.v3.sInput.ucDispPllConfig |=
+							DISPPLL_CONFIG_DUAL_LINK;
+				}
+				if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+				    ENCODER_OBJECT_ID_NONE)
+					args.v3.sInput.ucExtTransmitterID =
+						radeon_encoder_get_dp_bridge_encoder_id(encoder);
+				else
+					args.v3.sInput.ucExtTransmitterID = 0;
+
+				atom_execute_table(rdev->mode_info.atom_context,
+						   index, (uint32_t *)&args);
+				adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+				if (args.v3.sOutput.ucRefDiv) {
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+					radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
+				}
+				if (args.v3.sOutput.ucPostDiv) {
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
+					radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
+				}
+				break;
+			default:
+				DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+				return adjusted_clock;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return adjusted_clock;
+		}
+	}
+	return adjusted_clock;
+}
+
+union set_pixel_clock {
+	SET_PIXEL_CLOCK_PS_ALLOCATION base;
+	PIXEL_CLOCK_PARAMETERS v1;
+	PIXEL_CLOCK_PARAMETERS_V2 v2;
+	PIXEL_CLOCK_PARAMETERS_V3 v3;
+	PIXEL_CLOCK_PARAMETERS_V5 v5;
+	PIXEL_CLOCK_PARAMETERS_V6 v6;
+};
+
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
+				    u32 dispclk)
+{
+	u8 frev, crev;
+	int index;
+	union set_pixel_clock args;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 5:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v5.ucCRTC = ATOM_CRTC_INVALID;
+			args.v5.usPixelClock = cpu_to_le16(dispclk);
+			args.v5.ucPpll = ATOM_DCPLL;
+			break;
+		case 6:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
+			if (ASIC_IS_DCE61(rdev))
+				args.v6.ucPpll = ATOM_EXT_PLL1;
+			else if (ASIC_IS_DCE6(rdev))
+				args.v6.ucPpll = ATOM_PPLL0;
+			else
+				args.v6.ucPpll = ATOM_DCPLL;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+				      u32 crtc_id,
+				      int pll_id,
+				      u32 encoder_mode,
+				      u32 encoder_id,
+				      u32 clock,
+				      u32 ref_div,
+				      u32 fb_div,
+				      u32 frac_fb_div,
+				      u32 post_div,
+				      int bpc,
+				      bool ss_enabled,
+				      struct radeon_atom_ss *ss)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u8 frev, crev;
+	int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+	union set_pixel_clock args;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			if (clock == ATOM_DISABLE)
+				return;
+			args.v1.usPixelClock = cpu_to_le16(clock / 10);
+			args.v1.usRefDiv = cpu_to_le16(ref_div);
+			args.v1.usFbDiv = cpu_to_le16(fb_div);
+			args.v1.ucFracFbDiv = frac_fb_div;
+			args.v1.ucPostDiv = post_div;
+			args.v1.ucPpll = pll_id;
+			args.v1.ucCRTC = crtc_id;
+			args.v1.ucRefDivSrc = 1;
+			break;
+		case 2:
+			args.v2.usPixelClock = cpu_to_le16(clock / 10);
+			args.v2.usRefDiv = cpu_to_le16(ref_div);
+			args.v2.usFbDiv = cpu_to_le16(fb_div);
+			args.v2.ucFracFbDiv = frac_fb_div;
+			args.v2.ucPostDiv = post_div;
+			args.v2.ucPpll = pll_id;
+			args.v2.ucCRTC = crtc_id;
+			args.v2.ucRefDivSrc = 1;
+			break;
+		case 3:
+			args.v3.usPixelClock = cpu_to_le16(clock / 10);
+			args.v3.usRefDiv = cpu_to_le16(ref_div);
+			args.v3.usFbDiv = cpu_to_le16(fb_div);
+			args.v3.ucFracFbDiv = frac_fb_div;
+			args.v3.ucPostDiv = post_div;
+			args.v3.ucPpll = pll_id;
+			if (crtc_id == ATOM_CRTC2)
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+			else
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+			args.v3.ucTransmitterId = encoder_id;
+			args.v3.ucEncoderMode = encoder_mode;
+			break;
+		case 5:
+			args.v5.ucCRTC = crtc_id;
+			args.v5.usPixelClock = cpu_to_le16(clock / 10);
+			args.v5.ucRefDiv = ref_div;
+			args.v5.usFbDiv = cpu_to_le16(fb_div);
+			args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v5.ucPostDiv = post_div;
+			args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
+			switch (bpc) {
+			case 8:
+			default:
+				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+				break;
+			case 10:
+				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+				break;
+			}
+			args.v5.ucTransmitterID = encoder_id;
+			args.v5.ucEncoderMode = encoder_mode;
+			args.v5.ucPpll = pll_id;
+			break;
+		case 6:
+			args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
+			args.v6.ucRefDiv = ref_div;
+			args.v6.usFbDiv = cpu_to_le16(fb_div);
+			args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v6.ucPostDiv = post_div;
+			args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+			switch (bpc) {
+			case 8:
+			default:
+				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+				break;
+			case 10:
+				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
+				break;
+			case 12:
+				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
+				break;
+			case 16:
+				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+				break;
+			}
+			args.v6.ucTransmitterID = encoder_id;
+			args.v6.ucEncoderMode = encoder_mode;
+			args.v6.ucPpll = pll_id;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+	radeon_crtc->bpc = 8;
+	radeon_crtc->ss_enabled = false;
+
+	if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
+		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+		struct drm_connector *connector =
+			radeon_get_connector_for_encoder(radeon_crtc->encoder);
+		struct radeon_connector *radeon_connector =
+			to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+		int dp_clock;
+		radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
+
+		switch (encoder_mode) {
+		case ATOM_ENCODER_MODE_DP_MST:
+		case ATOM_ENCODER_MODE_DP:
+			/* DP/eDP */
+			dp_clock = dig_connector->dp_clock / 10;
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_DP,
+									 dp_clock);
+			else {
+				if (dp_clock == 16200) {
+					radeon_crtc->ss_enabled =
+						radeon_atombios_get_ppll_ss_info(rdev,
+										 &radeon_crtc->ss,
+										 ATOM_DP_SS_ID2);
+					if (!radeon_crtc->ss_enabled)
+						radeon_crtc->ss_enabled =
+							radeon_atombios_get_ppll_ss_info(rdev,
+											 &radeon_crtc->ss,
+											 ATOM_DP_SS_ID1);
+				} else
+					radeon_crtc->ss_enabled =
+						radeon_atombios_get_ppll_ss_info(rdev,
+										 &radeon_crtc->ss,
+										 ATOM_DP_SS_ID1);
+			}
+			break;
+		case ATOM_ENCODER_MODE_LVDS:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 dig->lcd_ss_id,
+									 mode->clock / 10);
+			else
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_ppll_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 dig->lcd_ss_id);
+			break;
+		case ATOM_ENCODER_MODE_DVI:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_TMDS,
+									 mode->clock / 10);
+			break;
+		case ATOM_ENCODER_MODE_HDMI:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_HDMI,
+									 mode->clock / 10);
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* adjust pixel clock as needed */
+	radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
+
+	return true;
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	u32 pll_clock = mode->clock;
+	u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+	struct radeon_pll *pll;
+	int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+	switch (radeon_crtc->pll_id) {
+	case ATOM_PPLL1:
+		pll = &rdev->clock.p1pll;
+		break;
+	case ATOM_PPLL2:
+		pll = &rdev->clock.p2pll;
+		break;
+	case ATOM_DCPLL:
+	case ATOM_PPLL_INVALID:
+	default:
+		pll = &rdev->clock.dcpll;
+		break;
+	}
+
+	/* update pll params */
+	pll->flags = radeon_crtc->pll_flags;
+	pll->reference_div = radeon_crtc->pll_reference_div;
+	pll->post_div = radeon_crtc->pll_post_div;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+		/* TV seems to prefer the legacy algo on some boards */
+		radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					  &fb_div, &frac_fb_div, &ref_div, &post_div);
+	else if (ASIC_IS_AVIVO(rdev))
+		radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					 &fb_div, &frac_fb_div, &ref_div, &post_div);
+	else
+		radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					  &fb_div, &frac_fb_div, &ref_div, &post_div);
+
+	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
+				 radeon_crtc->crtc_id, &radeon_crtc->ss);
+
+	atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+				  encoder_mode, radeon_encoder->encoder_id, mode->clock,
+				  ref_div, fb_div, frac_fb_div, post_div,
+				  radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
+
+	if (radeon_crtc->ss_enabled) {
+		/* calculate ss amount and step size */
+		if (ASIC_IS_DCE4(rdev)) {
+			u32 step_size;
+			u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+			radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+			radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+				ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
+			if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+				step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+					(125 * 25 * pll->reference_freq / 100);
+			else
+				step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+					(125 * 25 * pll->reference_freq / 100);
+			radeon_crtc->ss.step = step_size;
+		}
+
+		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
+					 radeon_crtc->crtc_id, &radeon_crtc->ss);
+	}
+}
+
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 int x, int y, int atomic)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_framebuffer *radeon_fb;
+	struct drm_framebuffer *target_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	uint64_t fb_location;
+	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	unsigned bankw, bankh, mtaspect, tile_split;
+	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+	u32 tmp, viewport_w, viewport_h;
+	int r;
+
+	/* no fb bound */
+	if (!atomic && !crtc->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		target_fb = fb;
+	}
+	else {
+		radeon_fb = to_radeon_framebuffer(crtc->fb);
+		target_fb = crtc->fb;
+	}
+
+	/* If atomic, assume fb object is pinned & idle & fenced and
+	 * just update base pointers
+	 */
+	obj = radeon_fb->obj;
+	rbo = gem_to_radeon_bo(obj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	if (atomic)
+		fb_location = radeon_bo_gpu_offset(rbo);
+	else {
+		r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+		if (unlikely(r != 0)) {
+			radeon_bo_unreserve(rbo);
+			return -EINVAL;
+		}
+	}
+
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	switch (target_fb->bits_per_pixel) {
+	case 8:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+		break;
+	case 15:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+		break;
+	case 16:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case 24:
+	case 32:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		break;
+	default:
+		DRM_ERROR("Unsupported screen depth %d\n",
+			  target_fb->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		if (rdev->family >= CHIP_TAHITI)
+			tmp = rdev->config.si.tile_config;
+		else if (rdev->family >= CHIP_CAYMAN)
+			tmp = rdev->config.cayman.tile_config;
+		else
+			tmp = rdev->config.evergreen.tile_config;
+
+		switch ((tmp & 0xf0) >> 4) {
+		case 0: /* 4 banks */
+			fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+			break;
+		case 1: /* 8 banks */
+		default:
+			fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+			break;
+		case 2: /* 16 banks */
+			fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+			break;
+		}
+
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+
+		evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
+		fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+		fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+		fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+		fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+	} else if (tiling_flags & RADEON_TILING_MICRO)
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
+	if ((rdev->family == CHIP_TAHITI) ||
+	    (rdev->family == CHIP_PITCAIRN))
+		fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+	else if (rdev->family == CHIP_VERDE)
+		fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
+	switch (radeon_crtc->crtc_id) {
+	case 0:
+		WREG32(AVIVO_D1VGA_CONTROL, 0);
+		break;
+	case 1:
+		WREG32(AVIVO_D2VGA_CONTROL, 0);
+		break;
+	case 2:
+		WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+		break;
+	case 3:
+		WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+		break;
+	case 4:
+		WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+		break;
+	case 5:
+		WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+		break;
+	default:
+		break;
+	}
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+	WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+
+	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+	WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+	WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+	WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+	       target_fb->height);
+	x &= ~3;
+	y &= ~1;
+	WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+	       (x << 16) | y);
+	viewport_w = crtc->mode.hdisplay;
+	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+	       (viewport_w << 16) | viewport_h);
+
+	/* pageflip setup */
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	if (!atomic && fb && fb != crtc->fb) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  int x, int y, int atomic)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_framebuffer *radeon_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	struct drm_framebuffer *target_fb;
+	uint64_t fb_location;
+	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
+	u32 tmp, viewport_w, viewport_h;
+	int r;
+
+	/* no fb bound */
+	if (!atomic && !crtc->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		target_fb = fb;
+	}
+	else {
+		radeon_fb = to_radeon_framebuffer(crtc->fb);
+		target_fb = crtc->fb;
+	}
+
+	obj = radeon_fb->obj;
+	rbo = gem_to_radeon_bo(obj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	/* If atomic, assume fb object is pinned & idle & fenced and
+	 * just update base pointers
+	 */
+	if (atomic)
+		fb_location = radeon_bo_gpu_offset(rbo);
+	else {
+		r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+		if (unlikely(r != 0)) {
+			radeon_bo_unreserve(rbo);
+			return -EINVAL;
+		}
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	switch (target_fb->bits_per_pixel) {
+	case 8:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
+		    AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
+		break;
+	case 15:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+		    AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+		break;
+	case 16:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+		    AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+		break;
+	case 24:
+	case 32:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+		    AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+		break;
+	default:
+		DRM_ERROR("Unsupported screen depth %d\n",
+			  target_fb->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
+		else if (tiling_flags & RADEON_TILING_MICRO)
+			fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
+	} else {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
+
+		if (tiling_flags & RADEON_TILING_MICRO)
+			fb_format |= AVIVO_D1GRPH_TILED;
+	}
+
+	if (radeon_crtc->crtc_id == 0)
+		WREG32(AVIVO_D1VGA_CONTROL, 0);
+	else
+		WREG32(AVIVO_D2VGA_CONTROL, 0);
+
+	if (rdev->family >= CHIP_RV770) {
+		if (radeon_crtc->crtc_id) {
+			WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+			WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+		} else {
+			WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+			WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+		}
+	}
+	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32) fb_location);
+	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
+	       radeon_crtc->crtc_offset, (u32) fb_location);
+	WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+
+	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+	WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+
+	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+	WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+	WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+	WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+	       target_fb->height);
+	x &= ~3;
+	y &= ~1;
+	WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
+	       (x << 16) | y);
+	viewport_w = crtc->mode.hdisplay;
+	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+	WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+	       (viewport_w << 16) | viewport_h);
+
+	/* pageflip setup */
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	if (!atomic && fb && fb != crtc->fb) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+			   struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev))
+		return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	else if (ASIC_IS_AVIVO(rdev))
+		return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	else
+		return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                  struct drm_framebuffer *fb,
+				  int x, int y, enum mode_set_atomic state)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev))
+		return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
+	else if (ASIC_IS_AVIVO(rdev))
+		return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
+	else
+		return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+/* properly set additional regs when using atombios */
+static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	u32 disp_merge_cntl;
+
+	switch (radeon_crtc->crtc_id) {
+	case 0:
+		disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+		WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+		break;
+	case 1:
+		disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+		WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+		WREG32(RADEON_FP_H2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+		WREG32(RADEON_FP_V2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+		break;
+	}
+}
+
+/**
+ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+	u32 pll_in_use = 0;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+			pll_in_use |= (1 << test_radeon_crtc->pll_id);
+	}
+	return pll_in_use;
+}
+
+/**
+ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode.  For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->encoder &&
+		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* for DP use the same PLL for all */
+			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+				return test_radeon_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
+ *
+ * @crtc: drm crtc
+ * @encoder: drm encoder
+ *
+ * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
+ * be shared (i.e., same clock).
+ */
+static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+	u32 adjusted_clock, test_adjusted_clock;
+
+	adjusted_clock = radeon_crtc->adjusted_clock;
+
+	if (adjusted_clock == 0)
+		return ATOM_PPLL_INVALID;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->encoder &&
+		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* check if we are already driving this connector with another crtc */
+			if (test_radeon_crtc->connector == radeon_crtc->connector) {
+				/* if we are, return that pll */
+				if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+					return test_radeon_crtc->pll_id;
+			}
+			/* for non-DP check the clock */
+			test_adjusted_clock = test_radeon_crtc->adjusted_clock;
+			if ((crtc->mode.clock == test_crtc->mode.clock) &&
+			    (adjusted_clock == test_adjusted_clock) &&
+			    (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
+			    (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
+				return test_radeon_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 6.1
+ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+ *
+ * DCE 6.0
+ * - PPLL0 is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 5.0
+ * - DCPLL is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 3.0/4.0/4.1
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
+static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	u32 pll_in_use;
+	int pll;
+
+	if (ASIC_IS_DCE61(rdev)) {
+		struct radeon_encoder_atom_dig *dig =
+			radeon_encoder->enc_priv;
+
+		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
+		    (dig->linkb == false))
+			/* UNIPHY A uses PPLL2 */
+			return ATOM_PPLL2;
+		else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			/* UNIPHY B/C/D/E/F */
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
+		}
+		/* UNIPHY B/C/D/E/F */
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL0)))
+			return ATOM_PPLL0;
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+		 * depending on the asic:
+		 * DCE4: PPLL or ext clock
+		 * DCE5: PPLL, DCPLL, or ext clock
+		 * DCE6: PPLL, PPLL0, or ext clock
+		 *
+		 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+		 * PPLL/DCPLL programming and only program the DP DTO for the
+		 * crtc virtual pixel clock.
+		 */
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else if (ASIC_IS_DCE6(rdev))
+				/* use PPLL0 for all DP */
+				return ATOM_PPLL0;
+			else if (ASIC_IS_DCE5(rdev))
+				/* use DCPLL for all DP */
+				return ATOM_DCPLL;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
+		}
+		/* all other cases */
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		if (!(pll_in_use & (1 << ATOM_PPLL2)))
+			return ATOM_PPLL2;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else {
+		/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+		/* some atombios (observed in some DCE2/DCE3) code have a bug,
+		 * the matching btw pll and crtc is done through
+		 * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+		 * pll (1 or 2) to select which register to write. ie if using
+		 * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+		 * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+		 * choose which value to write. Which is reverse order from
+		 * register logic. So only case that works is when pllid is
+		 * same as crtcid or when both pll and crtc are enabled and
+		 * both use same clock.
+		 *
+		 * So just return crtc id as if crtc and pll were hard linked
+		 * together even if they aren't
+		 */
+		return radeon_crtc->crtc_id;
+	}
+}
+
+void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
+{
+	/* always set DCPLL */
+	if (ASIC_IS_DCE6(rdev))
+		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
+	else if (ASIC_IS_DCE4(rdev)) {
+		struct radeon_atom_ss ss;
+		bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+								   ASIC_INTERNAL_SS_ON_DCPLL,
+								   rdev->clock.default_dispclk);
+		if (ss_enabled)
+			atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
+		/* XXX: DCE5, make sure voltage, dispclk is high enough */
+		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
+		if (ss_enabled)
+			atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
+	}
+
+}
+
+int atombios_crtc_mode_set(struct drm_crtc *crtc,
+			   struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode,
+			   int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	bool is_tvcv = false;
+
+	if (radeon_encoder->active_device &
+	    (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+		is_tvcv = true;
+
+	atombios_crtc_set_pll(crtc, adjusted_mode);
+
+	if (ASIC_IS_DCE4(rdev))
+		atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+	else if (ASIC_IS_AVIVO(rdev)) {
+		if (is_tvcv)
+			atombios_crtc_set_timing(crtc, adjusted_mode);
+		else
+			atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+	} else {
+		atombios_crtc_set_timing(crtc, adjusted_mode);
+		if (radeon_crtc->crtc_id == 0)
+			atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+		radeon_legacy_atom_fixup(crtc);
+	}
+	atombios_crtc_set_base(crtc, x, y, old_fb);
+	atombios_overscan_setup(crtc, mode, adjusted_mode);
+	atombios_scaler_setup(crtc);
+	return 0;
+}
+
+static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	/* assign the encoder to the radeon crtc to avoid repeated lookups later */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			radeon_crtc->encoder = encoder;
+			radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
+			break;
+		}
+	}
+	if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
+		radeon_crtc->encoder = NULL;
+		radeon_crtc->connector = NULL;
+		return false;
+	}
+	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+		return false;
+	if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
+		return false;
+	/* pick pll */
+	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+	/* if we can't get a PPLL for a non-DP encoder, fail */
+	if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
+	    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
+		return false;
+
+	return true;
+}
+
+static void atombios_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	radeon_crtc->in_mode_set = true;
+
+	/* disable crtc pair power gating before programming */
+	if (ASIC_IS_DCE6(rdev))
+		atombios_powergate_crtc(crtc, ATOM_DISABLE);
+
+	atombios_lock_crtc(crtc, ATOM_ENABLE);
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void atombios_crtc_commit(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	atombios_lock_crtc(crtc, ATOM_DISABLE);
+	radeon_crtc->in_mode_set = false;
+}
+
+static void atombios_crtc_disable(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_atom_ss ss;
+	int i;
+
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (ASIC_IS_DCE6(rdev))
+		atombios_powergate_crtc(crtc, ATOM_ENABLE);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i] &&
+		    rdev->mode_info.crtcs[i]->enabled &&
+		    i != radeon_crtc->crtc_id &&
+		    radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+			/* one other crtc is using this pll don't turn
+			 * off the pll
+			 */
+			goto done;
+		}
+	}
+
+	switch (radeon_crtc->pll_id) {
+	case ATOM_PPLL1:
+	case ATOM_PPLL2:
+		/* disable the ppll */
+		atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	case ATOM_PPLL0:
+		/* disable the ppll */
+		if (ASIC_IS_DCE61(rdev))
+			atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	default:
+		break;
+	}
+done:
+	radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+	radeon_crtc->adjusted_clock = 0;
+	radeon_crtc->encoder = NULL;
+	radeon_crtc->connector = NULL;
+}
+
+static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+	.dpms = atombios_crtc_dpms,
+	.mode_fixup = atombios_crtc_mode_fixup,
+	.mode_set = atombios_crtc_mode_set,
+	.mode_set_base = atombios_crtc_set_base,
+	.mode_set_base_atomic = atombios_crtc_set_base_atomic,
+	.prepare = atombios_crtc_prepare,
+	.commit = atombios_crtc_commit,
+	.load_lut = radeon_crtc_load_lut,
+	.disable = atombios_crtc_disable,
+};
+
+void radeon_atombios_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+		default:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+			break;
+		case 1:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+			break;
+		case 2:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+			break;
+		case 3:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+			break;
+		case 4:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+			break;
+		case 5:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+			break;
+		}
+	} else {
+		if (radeon_crtc->crtc_id == 1)
+			radeon_crtc->crtc_offset =
+				AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+		else
+			radeon_crtc->crtc_offset = 0;
+	}
+	radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+	radeon_crtc->adjusted_clock = 0;
+	radeon_crtc->encoder = NULL;
+	radeon_crtc->connector = NULL;
+	drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+}


Property changes on: trunk/sys/dev/drm2/radeon/atombios_crtc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atombios_dp.c
===================================================================
--- trunk/sys/dev/drm2/radeon/atombios_dp.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atombios_dp.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,894 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atombios_dp.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include <dev/drm2/drm_dp_helper.h>
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
+
+static char *voltage_names[] = {
+        "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+        "0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+/***** radeon AUX functions *****/
+union aux_channel_transaction {
+	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
+
+static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
+				 u8 *send, int send_bytes,
+				 u8 *recv, int recv_size,
+				 u8 delay, u8 *ack)
+{
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	union aux_channel_transaction args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+	unsigned char *base;
+	int recv_bytes;
+
+	memset(&args, 0, sizeof(args));
+
+	base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
+
+	memcpy(base, send, send_bytes);
+
+	args.v1.lpAuxRequest = 0 + 4;
+	args.v1.lpDataOut = 16 + 4;
+	args.v1.ucDataOutLen = 0;
+	args.v1.ucChannelID = chan->rec.i2c_id;
+	args.v1.ucDelay = delay / 10;
+	if (ASIC_IS_DCE4(rdev))
+		args.v2.ucHPD_ID = chan->rec.hpd;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	*ack = args.v1.ucReplyStatus;
+
+	/* timeout */
+	if (args.v1.ucReplyStatus == 1) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	/* flags not zero */
+	if (args.v1.ucReplyStatus == 2) {
+		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
+		return -EBUSY;
+	}
+
+	/* error */
+	if (args.v1.ucReplyStatus == 3) {
+		DRM_DEBUG_KMS("dp_aux_ch error\n");
+		return -EIO;
+	}
+
+	recv_bytes = args.v1.ucDataOutLen;
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+
+	if (recv && recv_size)
+		memcpy(recv, base + 16, recv_bytes);
+
+	return recv_bytes;
+}
+
+static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
+				      u16 address, u8 *send, u8 send_bytes, u8 delay)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	int ret;
+	u8 msg[20];
+	int msg_bytes = send_bytes + 4;
+	u8 ack;
+	unsigned retry;
+
+	if (send_bytes > 16)
+		return -1;
+
+	msg[0] = address;
+	msg[1] = address >> 8;
+	msg[2] = AUX_NATIVE_WRITE << 4;
+	msg[3] = (msg_bytes << 4) | (send_bytes - 1);
+	memcpy(&msg[4], send, send_bytes);
+
+	for (retry = 0; retry < 4; retry++) {
+		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
+					    msg, msg_bytes, NULL, 0, delay, &ack);
+		if (ret == -EBUSY)
+			continue;
+		else if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			return send_bytes;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(400);
+		else
+			return -EIO;
+	}
+
+	return -EIO;
+}
+
+static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
+				     u16 address, u8 *recv, int recv_bytes, u8 delay)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[4];
+	int msg_bytes = 4;
+	u8 ack;
+	int ret;
+	unsigned retry;
+
+	msg[0] = address;
+	msg[1] = address >> 8;
+	msg[2] = AUX_NATIVE_READ << 4;
+	msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
+
+	for (retry = 0; retry < 4; retry++) {
+		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
+					    msg, msg_bytes, recv, recv_bytes, delay, &ack);
+		if (ret == -EBUSY)
+			continue;
+		else if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			return ret;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(400);
+		else if (ret == 0)
+			return -EPROTO;
+		else
+			return -EIO;
+	}
+
+	return -EIO;
+}
+
+static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
+				 u16 reg, u8 val)
+{
+	radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
+}
+
+static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
+			       u16 reg)
+{
+	u8 val = 0;
+
+	radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
+
+	return val;
+}
+
+int radeon_dp_i2c_aux_ch(device_t dev, int mode, u8 write_byte, u8 *read_byte)
+{
+	struct iic_dp_aux_data *algo_data = device_get_softc(dev);
+	struct radeon_i2c_chan *auxch = algo_data->priv;
+	u16 address = algo_data->address;
+	u8 msg[5];
+	u8 reply[2];
+	unsigned retry;
+	int msg_bytes;
+	int reply_bytes = 1;
+	int ret;
+	u8 ack;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[2] = AUX_I2C_READ << 4;
+	else
+		msg[2] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[2] |= AUX_I2C_MOT << 4;
+
+	msg[0] = address;
+	msg[1] = address >> 8;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg_bytes = 5;
+		msg[3] = msg_bytes << 4;
+		msg[4] = write_byte;
+		break;
+	case MODE_I2C_READ:
+		msg_bytes = 4;
+		msg[3] = msg_bytes << 4;
+		break;
+	default:
+		msg_bytes = 4;
+		msg[3] = 3 << 4;
+		break;
+	}
+
+	for (retry = 0; retry < 4; retry++) {
+		ret = radeon_process_aux_ch(auxch,
+					    msg, msg_bytes, reply, reply_bytes, 0, &ack);
+		if (ret == -EBUSY)
+			continue;
+		else if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+
+		switch (ack & AUX_NATIVE_REPLY_MASK) {
+		case AUX_NATIVE_REPLY_ACK:
+			/* I2C-over-AUX Reply field is only valid
+			 * when paired with AUX ACK.
+			 */
+			break;
+		case AUX_NATIVE_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch native nack\n");
+			return -EREMOTEIO;
+		case AUX_NATIVE_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_ch native defer\n");
+			udelay(400);
+			continue;
+		default:
+			DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
+			return -EREMOTEIO;
+		}
+
+		switch (ack & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ)
+				*read_byte = reply[0];
+			return ret;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_i2c nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_i2c defer\n");
+			udelay(400);
+			break;
+		default:
+			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
+			return -EREMOTEIO;
+		}
+	}
+
+	DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
+	return -EREMOTEIO;
+}
+
+/***** general DP utility functions *****/
+
+#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
+#define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPHASIS_9_5
+
+static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+				int lane_count,
+				u8 train_set[4])
+{
+	u8 v = 0;
+	u8 p = 0;
+	int lane;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+			  lane,
+			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+
+	if (v >= DP_VOLTAGE_MAX)
+		v |= DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p >= DP_PRE_EMPHASIS_MAX)
+		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
+		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+	for (lane = 0; lane < 4; lane++)
+		train_set[lane] = v | p;
+}
+
+/* convert bits per color to bits per pixel */
+/* get bpc from the EDID */
+static int convert_bpc_to_bpp(int bpc)
+{
+	if (bpc == 0)
+		return 24;
+	else
+		return bpc * 3;
+}
+
+/* get the max pix clock supported by the link rate and lane num */
+static int dp_get_max_dp_pix_clock(int link_rate,
+				   int lane_num,
+				   int bpp)
+{
+	return (link_rate * lane_num * 8) / bpp;
+}
+
+/***** radeon specific DP functions *****/
+
+/* First get the min lane# when low rate is used according to pixel clock
+ * (prefer low rate), second check max lane# supported by DP panel,
+ * if the max lane# < low rate lane# then use max lane# instead.
+ */
+static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
+					u8 dpcd[DP_DPCD_SIZE],
+					int pix_clock)
+{
+	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+	int max_link_rate = drm_dp_max_link_rate(dpcd);
+	int max_lane_num = drm_dp_max_lane_count(dpcd);
+	int lane_num;
+	int max_dp_pix_clock;
+
+	for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
+		max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
+		if (pix_clock <= max_dp_pix_clock)
+			break;
+	}
+
+	return lane_num;
+}
+
+static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
+				       u8 dpcd[DP_DPCD_SIZE],
+				       int pix_clock)
+{
+	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+	int lane_num, max_pix_clock;
+
+	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+	    ENCODER_OBJECT_ID_NUTMEG)
+		return 270000;
+
+	lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
+	max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
+	if (pix_clock <= max_pix_clock)
+		return 162000;
+	max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
+	if (pix_clock <= max_pix_clock)
+		return 270000;
+	if (radeon_connector_is_dp12_capable(connector)) {
+		max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
+		if (pix_clock <= max_pix_clock)
+			return 540000;
+	}
+
+	return drm_dp_max_link_rate(dpcd);
+}
+
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+				    int action, int dp_clock,
+				    u8 ucconfig, u8 lane_num)
+{
+	DP_ENCODER_SERVICE_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+	memset(&args, 0, sizeof(args));
+	args.ucLinkClock = dp_clock / 10;
+	args.ucConfig = ucconfig;
+	args.ucAction = action;
+	args.ucLaneNum = lane_num;
+	args.ucStatus = 0;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+					 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
+}
+
+static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 buf[3];
+
+	if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+		return;
+
+	if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
+		DRM_DEBUG_KMS("Sink OUI: %02hhx%02hhx%02hhx\n",
+			      buf[0], buf[1], buf[2]);
+
+	if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
+		DRM_DEBUG_KMS("Branch OUI: %02hhx%02hhx%02hhx\n",
+			      buf[0], buf[1], buf[2]);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[DP_DPCD_SIZE];
+	int ret, i;
+
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
+					DP_DPCD_SIZE, 0);
+	if (ret > 0) {
+		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+		DRM_DEBUG_KMS("DPCD: ");
+		for (i = 0; i < DP_DPCD_SIZE; i++)
+			DRM_DEBUG_KMS("%02x ", msg[i]);
+		DRM_DEBUG_KMS("\n");
+
+		radeon_dp_probe_oui(radeon_connector);
+
+		return true;
+	}
+	dig_connector->dpcd[0] = 0;
+	return false;
+}
+
+int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+			     struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+	u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
+	u8 tmp;
+
+	if (!ASIC_IS_DCE4(rdev))
+		return panel_mode;
+
+	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
+		/* DP bridge chips */
+		tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+		if (tmp & 1)
+			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+		else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+			 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
+			panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+		else
+			panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+		/* eDP */
+		tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+		if (tmp & 1)
+			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+	}
+
+	return panel_mode;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+			       const struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+		dig_connector->dp_clock =
+			radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+		dig_connector->dp_lane_count =
+			radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+	}
+}
+
+int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+	int dp_clock;
+
+	if (!radeon_connector->con_priv)
+		return MODE_CLOCK_HIGH;
+	dig_connector = radeon_connector->con_priv;
+
+	dp_clock =
+		radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+
+	if ((dp_clock == 540000) &&
+	    (!radeon_connector_is_dp12_capable(connector)))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
+				      u8 link_status[DP_LINK_STATUS_SIZE])
+{
+	int ret;
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
+					link_status, DP_LINK_STATUS_SIZE, 100);
+	if (ret <= 0) {
+		return false;
+	}
+
+	DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
+	return true;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+	if (!radeon_dp_get_link_status(radeon_connector, link_status))
+		return false;
+	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
+		return false;
+	return true;
+}
+
+struct radeon_dp_link_train_info {
+	struct radeon_device *rdev;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	int enc_id;
+	int dp_clock;
+	int dp_lane_count;
+	bool tp3_supported;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 train_set[4];
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	u8 tries;
+	bool use_dpencoder;
+};
+
+static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
+{
+	/* set the initial vs/emph on the source */
+	atombios_dig_transmitter_setup(dp_info->encoder,
+				       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+				       0, dp_info->train_set[0]); /* sets all lanes at once */
+
+	/* set the vs/emph on the sink */
+	radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
+				   dp_info->train_set, dp_info->dp_lane_count, 0);
+}
+
+static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
+{
+	int rtp = 0;
+
+	/* set training pattern on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
+		switch (tp) {
+		case DP_TRAINING_PATTERN_1:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
+			break;
+		}
+		atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
+	} else {
+		switch (tp) {
+		case DP_TRAINING_PATTERN_1:
+			rtp = 0;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			rtp = 1;
+			break;
+		}
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+					  dp_info->dp_clock, dp_info->enc_id, rtp);
+	}
+
+	/* enable training pattern on the sink */
+	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
+}
+
+static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u8 tmp;
+
+	/* power up the sink */
+	if (dp_info->dpcd[0] >= 0x11)
+		radeon_write_dpcd_reg(dp_info->radeon_connector,
+				      DP_SET_POWER, DP_SET_POWER_D0);
+
+	/* possibly enable downspread on the sink */
+	if (dp_info->dpcd[3] & 0x1)
+		radeon_write_dpcd_reg(dp_info->radeon_connector,
+				      DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
+	else
+		radeon_write_dpcd_reg(dp_info->radeon_connector,
+				      DP_DOWNSPREAD_CTRL, 0);
+
+	if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+	    (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+		radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+	}
+
+	/* set the lane count on the sink */
+	tmp = dp_info->dp_lane_count;
+	if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
+
+	/* set the link rate on the sink */
+	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
+	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
+
+	/* start training on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
+		atombios_dig_encoder_setup(dp_info->encoder,
+					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
+	else
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
+					  dp_info->dp_clock, dp_info->enc_id, 0);
+
+	/* disable the training pattern on the sink */
+	radeon_write_dpcd_reg(dp_info->radeon_connector,
+			      DP_TRAINING_PATTERN_SET,
+			      DP_TRAINING_PATTERN_DISABLE);
+
+	return 0;
+}
+
+static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
+{
+	udelay(400);
+
+	/* disable the training pattern on the sink */
+	radeon_write_dpcd_reg(dp_info->radeon_connector,
+			      DP_TRAINING_PATTERN_SET,
+			      DP_TRAINING_PATTERN_DISABLE);
+
+	/* disable the training pattern on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
+		atombios_dig_encoder_setup(dp_info->encoder,
+					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
+	else
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+					  dp_info->dp_clock, dp_info->enc_id, 0);
+
+	return 0;
+}
+
+static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
+{
+	bool clock_recovery;
+ 	u8 voltage;
+	int i;
+
+	radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
+	memset(dp_info->train_set, 0, 4);
+	radeon_dp_update_vs_emph(dp_info);
+
+	udelay(400);
+
+	/* clock recovery loop */
+	clock_recovery = false;
+	dp_info->tries = 0;
+	voltage = 0xff;
+	while (1) {
+		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
+
+		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+			DRM_ERROR("displayport link status failed\n");
+			break;
+		}
+
+		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+			clock_recovery = true;
+			break;
+		}
+
+		for (i = 0; i < dp_info->dp_lane_count; i++) {
+			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		}
+		if (i == dp_info->dp_lane_count) {
+			DRM_ERROR("clock recovery reached max voltage\n");
+			break;
+		}
+
+		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++dp_info->tries;
+			if (dp_info->tries == 5) {
+				DRM_ERROR("clock recovery tried 5 times\n");
+				break;
+			}
+		} else
+			dp_info->tries = 0;
+
+		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
+
+		radeon_dp_update_vs_emph(dp_info);
+	}
+	if (!clock_recovery) {
+		DRM_ERROR("clock recovery failed\n");
+		return -1;
+	} else {
+		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
+			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
+		return 0;
+	}
+}
+
+static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
+{
+	bool channel_eq;
+
+	if (dp_info->tp3_supported)
+		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
+	else
+		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
+
+	/* channel equalization loop */
+	dp_info->tries = 0;
+	channel_eq = false;
+	while (1) {
+		drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+
+		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+			DRM_ERROR("displayport link status failed\n");
+			break;
+		}
+
+		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times */
+		if (dp_info->tries > 5) {
+			DRM_ERROR("channel eq failed: 5 tries\n");
+			break;
+		}
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
+
+		radeon_dp_update_vs_emph(dp_info);
+		dp_info->tries++;
+	}
+
+	if (!channel_eq) {
+		DRM_ERROR("channel eq failed\n");
+		return -1;
+	} else {
+		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
+			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+		return 0;
+	}
+}
+
+void radeon_dp_link_train(struct drm_encoder *encoder,
+			  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+	struct radeon_dp_link_train_info dp_info;
+	int index;
+	u8 tmp, frev, crev;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+	dig = radeon_encoder->enc_priv;
+
+	radeon_connector = to_radeon_connector(connector);
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
+	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
+		return;
+
+	/* DPEncoderService newer than 1.1 can't program properly the
+	 * training pattern. When facing such version use the
+	 * DIGXEncoderControl (X== 1 | 2)
+	 */
+	dp_info.use_dpencoder = true;
+	index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+	if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
+		if (crev > 1) {
+			dp_info.use_dpencoder = false;
+		}
+	}
+
+	dp_info.enc_id = 0;
+	if (dig->dig_encoder)
+		dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+	else
+		dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+	if (dig->linkb)
+		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
+	else
+		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
+
+	tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
+	if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
+		dp_info.tp3_supported = true;
+	else
+		dp_info.tp3_supported = false;
+
+	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
+	dp_info.rdev = rdev;
+	dp_info.encoder = encoder;
+	dp_info.connector = connector;
+	dp_info.radeon_connector = radeon_connector;
+	dp_info.dp_lane_count = dig_connector->dp_lane_count;
+	dp_info.dp_clock = dig_connector->dp_clock;
+
+	if (radeon_dp_link_train_init(&dp_info))
+		goto done;
+	if (radeon_dp_link_train_cr(&dp_info))
+		goto done;
+	if (radeon_dp_link_train_ce(&dp_info))
+		goto done;
+done:
+	if (radeon_dp_link_train_finish(&dp_info))
+		return;
+}


Property changes on: trunk/sys/dev/drm2/radeon/atombios_dp.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atombios_encoders.c
===================================================================
--- trunk/sys/dev/drm2/radeon/atombios_encoders.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atombios_encoders.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2667 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-11 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atombios_encoders.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h" /* Declares several prototypes; clang is pleased. */
+#include "atom.h"
+
+static u8
+radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
+{
+	u8 backlight_level;
+	u32 bios_2_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
+			   ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+	return backlight_level;
+}
+
+static void
+radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
+				       u8 backlight_level)
+{
+	u32 bios_2_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+	bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+			   ATOM_S2_CURRENT_BL_LEVEL_MASK);
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+	else
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
+
+u8
+atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return 0;
+
+	return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+void
+atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+	struct drm_encoder *encoder = &radeon_encoder->base;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_atom_dig *dig;
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+	int index;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+	    radeon_encoder->enc_priv) {
+		dig = radeon_encoder->enc_priv;
+		dig->backlight_level = level;
+		radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+			if (dig->backlight_level == 0) {
+				args.ucAction = ATOM_LCD_BLOFF;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			} else {
+				args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+				args.ucAction = ATOM_LCD_BLON;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			if (dig->backlight_level == 0)
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+			else {
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static u8 radeon_atom_bl_level(struct backlight_device *bd)
+{
+	u8 level;
+
+	/* Convert brightness to hardware level */
+	if (bd->props.brightness < 0)
+		level = 0;
+	else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+		level = RADEON_MAX_BL_LEVEL;
+	else
+		level = bd->props.brightness;
+
+	return level;
+}
+
+static int radeon_atom_backlight_update_status(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+	atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
+
+	return 0;
+}
+
+static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+static const struct backlight_ops radeon_atom_backlight_ops = {
+	.get_brightness = radeon_atom_backlight_get_brightness,
+	.update_status	= radeon_atom_backlight_update_status,
+};
+
+void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+				struct drm_connector *drm_connector)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct radeon_backlight_privdata *pdata;
+	struct radeon_encoder_atom_dig *dig;
+	u8 backlight_level;
+	char bl_name[16];
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (!rdev->is_atom_bios)
+		return;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	pdata = malloc(sizeof(struct radeon_backlight_privdata), DRM_MEM_DRIVER, M_NOWAIT);
+	if (!pdata) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto error;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = RADEON_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+	snprintf(bl_name, sizeof(bl_name),
+		 "radeon_bl%d", dev->primary->index);
+	bd = backlight_device_register(bl_name, &drm_connector->kdev,
+				       pdata, &radeon_atom_backlight_ops, &props);
+	if (IS_ERR(bd)) {
+		DRM_ERROR("Backlight registration failed\n");
+		goto error;
+	}
+
+	pdata->encoder = radeon_encoder;
+
+	backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
+
+	dig = radeon_encoder->enc_priv;
+	dig->bl_dev = bd;
+
+	bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+
+	DRM_INFO("radeon atom DIG backlight initialized\n");
+
+	return;
+
+error:
+	free(pdata, DRM_MEM_DRIVER);
+	return;
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd = NULL;
+	struct radeon_encoder_atom_dig *dig;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (!rdev->is_atom_bios)
+		return;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	dig = radeon_encoder->enc_priv;
+	bd = dig->bl_dev;
+	dig->bl_dev = NULL;
+
+	if (bd) {
+		struct radeon_legacy_backlight_privdata *pdata;
+
+		pdata = bl_get_data(bd);
+		backlight_device_unregister(bd);
+		free(pdata, DRM_MEM_DRIVER);
+
+		DRM_INFO("radeon atom LVDS backlight unloaded\n");
+	}
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+				struct drm_connector *drm_connector)
+{
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+				struct drm_display_mode *mode);
+#endif
+
+
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* set the active encoder to connector routing */
+	radeon_encoder_set_active_device(encoder);
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* hw bug */
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+	/* get the native mode for TV */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+		if (tv_dac) {
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J ||
+			    tv_dac->tv_std == TV_STD_PAL_M)
+				radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+			else
+				radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+		}
+	}
+
+	if (ASIC_IS_DCE3(rdev) &&
+	    ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	     (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+		radeon_dp_set_link_config(connector, adjusted_mode);
+	}
+
+	return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+		break;
+	}
+
+	args.ucAction = action;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
+		args.ucDacStandard = ATOM_DAC1_PS2;
+	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		args.ucDacStandard = ATOM_DAC1_CV;
+	else {
+		switch (dac_info->tv_std) {
+		case TV_STD_PAL:
+		case TV_STD_PAL_M:
+		case TV_STD_SCART_PAL:
+		case TV_STD_SECAM:
+		case TV_STD_PAL_CN:
+			args.ucDacStandard = ATOM_DAC1_PAL;
+			break;
+		case TV_STD_NTSC:
+		case TV_STD_NTSC_J:
+		case TV_STD_PAL_60:
+		default:
+			args.ucDacStandard = ATOM_DAC1_NTSC;
+			break;
+		}
+	}
+	args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	TV_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+	args.sTVEncoder.ucAction = action;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+	else {
+		switch (dac_info->tv_std) {
+		case TV_STD_NTSC:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+			break;
+		case TV_STD_PAL:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+			break;
+		case TV_STD_PAL_M:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+			break;
+		case TV_STD_PAL_60:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+			break;
+		case TV_STD_NTSC_J:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+			break;
+		case TV_STD_SCART_PAL:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+			break;
+		case TV_STD_SECAM:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+			break;
+		case TV_STD_PAL_CN:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+			break;
+		default:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+			break;
+		}
+	}
+
+	args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 8;
+
+	if (connector)
+		bpc = radeon_get_monitor_bpc(connector);
+
+	switch (bpc) {
+	case 0:
+		return PANEL_BPC_UNDEFINE;
+	case 6:
+		return PANEL_6BIT_PER_COLOR;
+	case 8:
+	default:
+		return PANEL_8BIT_PER_COLOR;
+	case 10:
+		return PANEL_10BIT_PER_COLOR;
+	case 12:
+		return PANEL_12BIT_PER_COLOR;
+	case 16:
+		return PANEL_16BIT_PER_COLOR;
+	}
+}
+
+
+union dvo_encoder_control {
+	ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+};
+
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	union dvo_encoder_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+	uint8_t frev, crev;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	/* some R4xx chips have the wrong frev */
+	if (rdev->family <= CHIP_RV410)
+		frev = 1;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			/* R4xx, R5xx */
+			args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+
+			if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+
+			args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			break;
+		case 2:
+			/* RS600/690/740 */
+			args.dvo.sDVOEncoder.ucAction = action;
+			args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			/* DFP1, CRT1, TV1 depending on the type of port */
+			args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+			if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+			break;
+		case 3:
+			/* R6xx */
+			args.dvo_v3.ucAction = action;
+			args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.dvo_v3.ucDVOConfig = 0; /* XXX */
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union lvds_encoder_control {
+	LVDS_ENCODER_CONTROL_PS_ALLOCATION    v1;
+	LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	union lvds_encoder_control args;
+	int index = 0;
+	int hdmi_detected = 0;
+	uint8_t frev, crev;
+
+	if (!dig)
+		return;
+
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+		hdmi_detected = 1;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+		break;
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+	case 2:
+		switch (crev) {
+		case 1:
+			args.v1.ucMisc = 0;
+			args.v1.ucAction = action;
+			if (hdmi_detected)
+				args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+					args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			} else {
+				if (dig->linkb)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				/*if (pScrn->rgbBits == 8) */
+				args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			}
+			break;
+		case 2:
+		case 3:
+			args.v2.ucMisc = 0;
+			args.v2.ucAction = action;
+			if (crev == 3) {
+				if (dig->coherent_mode)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+			}
+			if (hdmi_detected)
+				args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+			args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v2.ucTruncate = 0;
+			args.v2.ucSpatial = 0;
+			args.v2.ucTemporal = 0;
+			args.v2.ucFRC = 0;
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
+					args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+						args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+				}
+				if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
+					args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+					if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+				}
+			} else {
+				if (dig->linkb)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+
+	/* dp bridges are always DP */
+	if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
+		return ATOM_ENCODER_MODE_DP;
+
+	/* DVO is always DVO */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
+		return ATOM_ENCODER_MODE_DVO;
+
+	connector = radeon_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	radeon_connector = to_radeon_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+		if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+		    radeon_audio)
+			return ATOM_ENCODER_MODE_HDMI;
+		else if (radeon_connector->use_digital)
+			return ATOM_ENCODER_MODE_DVI;
+		else
+			return ATOM_ENCODER_MODE_CRT;
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	default:
+		if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+		    radeon_audio)
+			return ATOM_ENCODER_MODE_HDMI;
+		else
+			return ATOM_ENCODER_MODE_DVI;
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		return ATOM_ENCODER_MODE_LVDS;
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return ATOM_ENCODER_MODE_DP;
+		else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+			 radeon_audio)
+			return ATOM_ENCODER_MODE_HDMI;
+		else
+			return ATOM_ENCODER_MODE_DVI;
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+		return ATOM_ENCODER_MODE_DP;
+	case DRM_MODE_CONNECTOR_DVIA:
+	case DRM_MODE_CONNECTOR_VGA:
+		return ATOM_ENCODER_MODE_CRT;
+		break;
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+		/* fix me */
+		return ATOM_ENCODER_MODE_TV;
+		/*return ATOM_ENCODER_MODE_CV;*/
+		break;
+	}
+}
+
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * DCE 4.0/5.0/6.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * llano
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ * ontario
+ * DIG1 drives UNIPHY0/1/2 link A
+ * DIG2 drives UNIPHY0/1/2 link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
+ */
+
+union dig_encoder_control {
+	DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	union dig_encoder_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int hpd_id = RADEON_HPD_NONE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		hpd_id = radeon_connector->hpd.hpd;
+	}
+
+	/* no dig encoder assigned */
+	if (dig->dig_encoder == -1)
+		return;
+
+	memset(&args, 0, sizeof(args));
+
+	if (ASIC_IS_DCE4(rdev))
+		index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+	else {
+		if (dig->dig_encoder)
+			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+				args.v1.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v1.ucLaneNum = 8;
+			else
+				args.v1.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+				break;
+			}
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+			break;
+		case 2:
+		case 3:
+			args.v3.ucAction = action;
+			args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+			args.v3.acConfig.ucDigSel = dig->dig_encoder;
+			args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v4.ucPanelMode = panel_mode;
+			else
+				args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+			}
+			args.v4.acConfig.ucDigSel = dig->dig_encoder;
+			args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			if (hpd_id == RADEON_HPD_NONE)
+				args.v4.ucHPD_ID = 0;
+			else
+				args.v4.ucHPD_ID = hpd_id + 1;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+	DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
+};
+
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector;
+	union dig_transmitter_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	bool is_dp = false;
+	int pll_id = 0;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	int igp_lane_info = 0;
+	int dig_encoder = dig->dig_encoder;
+	int hpd_id = RADEON_HPD_NONE;
+
+	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+		connector = radeon_get_connector_for_encoder_init(encoder);
+		/* just needed to avoid bailing in the encoder check.  the encoder
+		 * isn't used for init
+		 */
+		dig_encoder = 0;
+	} else
+		connector = radeon_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		hpd_id = radeon_connector->hpd.hpd;
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+		igp_lane_info = dig_connector->igp_lane_info;
+	}
+
+	if (encoder->crtc) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+		pll_id = radeon_crtc->pll_id;
+	}
+
+	/* no dig encoder assigned */
+	if (dig_encoder == -1)
+		return;
+
+	if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
+		is_dp = true;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+		break;
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v1.asMode.ucLaneSel = lane_num;
+				args.v1.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+
+			if (dig_encoder)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
+			if ((rdev->flags & RADEON_IS_IGP) &&
+			    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+				if (is_dp ||
+				    !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
+					if (igp_lane_info & 0x1)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+					else if (igp_lane_info & 0x2)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+					else if (igp_lane_info & 0x4)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+					else if (igp_lane_info & 0x8)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+				} else {
+					if (igp_lane_info & 0x3)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+					else if (igp_lane_info & 0xc)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+				}
+			}
+
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+			if (is_dp)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+			}
+			break;
+		case 2:
+			args.v2.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v2.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v2.asMode.ucLaneSel = lane_num;
+				args.v2.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			args.v2.acConfig.ucEncoderSel = dig_encoder;
+			if (dig->linkb)
+				args.v2.acConfig.ucLinkSel = 1;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v2.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v2.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v2.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp) {
+				args.v2.acConfig.fCoherentMode = 1;
+				args.v2.acConfig.fDPConnector = 1;
+			} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v2.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 3:
+			args.v3.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v3.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v3.asMode.ucLaneSel = lane_num;
+				args.v3.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v3.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v3.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE4, if there is an external clock, it generates the DP ref clock */
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v3.acConfig.ucRefClkSource = 2; /* external src */
+			else
+				args.v3.acConfig.ucRefClkSource = pll_id;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v3.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v3.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v3.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v3.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v3.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v4.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v4.asMode.ucLaneSel = lane_num;
+				args.v4.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v4.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v4.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE5 DCPLL usually generates the DP ref clock */
+			if (is_dp) {
+				if (rdev->clock.dp_extclk)
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
+				else
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
+			} else
+				args.v4.acConfig.ucRefClkSource = pll_id;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v4.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v4.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v4.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v4.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v4.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 5:
+			args.v5.ucAction = action;
+			if (is_dp)
+				args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
+			else
+				args.v5.usSymClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
+				break;
+			}
+			if (is_dp)
+				args.v5.ucLaneNum = dp_lane_count;
+			else if (radeon_encoder->pixel_clock > 165000)
+				args.v5.ucLaneNum = 8;
+			else
+				args.v5.ucLaneNum = 4;
+			args.v5.ucConnObjId = connector_object_id;
+			args.v5.ucDigMode = atombios_get_encoder_mode(encoder);
+
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
+			else
+				args.v5.asConfig.ucPhyClkSrcId = pll_id;
+
+			if (is_dp)
+				args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v5.asConfig.ucCoherentMode = 1;
+			}
+			if (hpd_id == RADEON_HPD_NONE)
+				args.v5.asConfig.ucHPDSel = 0;
+			else
+				args.v5.asConfig.ucHPDSel = hpd_id + 1;
+			args.v5.ucDigEncoderSel = 1 << dig_encoder;
+			args.v5.ucDPLaneSet = lane_set;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+bool
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	union dig_transmitter_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+	uint8_t frev, crev;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+		goto done;
+
+	if (!ASIC_IS_DCE4(rdev))
+		goto done;
+
+	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
+	    (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+		goto done;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		goto done;
+
+	memset(&args, 0, sizeof(args));
+
+	args.v1.ucAction = action;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* wait for the panel to power up */
+	if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
+		int i;
+
+		for (i = 0; i < 300; i++) {
+			if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+				return true;
+			mdelay(1);
+		}
+		return false;
+	}
+done:
+	return true;
+}
+
+union external_encoder_control {
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+				struct drm_encoder *ext_encoder,
+				int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+	union external_encoder_control args;
+	struct drm_connector *connector;
+	int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+	u8 frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+	if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	else
+		connector = radeon_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		/* no params on frev 1 */
+		break;
+	case 2:
+		switch (crev) {
+		case 1:
+		case 2:
+			args.v1.sDigEncoder.ucAction = action;
+			args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+				args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v1.sDigEncoder.ucLaneNum = 8;
+			else
+				args.v1.sDigEncoder.ucLaneNum = 4;
+			break;
+		case 3:
+			args.v3.sExtEncoder.ucAction = action;
+			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+			else
+				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.sExtEncoder.ucLaneNum = 8;
+			else
+				args.v3.sExtEncoder.ucLaneNum = 4;
+			switch (ext_enum) {
+			case GRAPH_OBJECT_ENUM_ID1:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+				break;
+			case GRAPH_OBJECT_ENUM_ID2:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+				break;
+			case GRAPH_OBJECT_ENUM_ID3:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+				break;
+			}
+			args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			break;
+		default:
+			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	ENABLE_YUV_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+	uint32_t temp, reg;
+
+	memset(&args, 0, sizeof(args));
+
+	if (rdev->family >= CHIP_R600)
+		reg = R600_BIOS_3_SCRATCH;
+	else
+		reg = RADEON_BIOS_3_SCRATCH;
+
+	/* XXX: fix up scratch reg handling */
+	temp = RREG32(reg);
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+		WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+			     (radeon_crtc->crtc_id << 18)));
+	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+	else
+		WREG32(reg, 0);
+
+	if (enable)
+		args.ucEnable = ATOM_ENABLE;
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	WREG32(reg, temp);
+}
+
+static void
+radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+		break;
+	default:
+		return;
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		args.ucAction = ATOM_ENABLE;
+		/* workaround for DVOOutputControl on some RS690 systems */
+		if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+			u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+			WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			WREG32(RADEON_BIOS_3_SCRATCH, reg);
+		} else
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			args.ucAction = ATOM_LCD_BLON;
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		}
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		args.ucAction = ATOM_DISABLE;
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			args.ucAction = ATOM_LCD_BLOFF;
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		}
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	struct radeon_connector *radeon_connector = NULL;
+	struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+
+	if (connector) {
+		radeon_connector = to_radeon_connector(connector);
+		radeon_dig_connector = radeon_connector->con_priv;
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+			if (!connector)
+				dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+			else
+				dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+			/* setup and enable the encoder */
+			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+			atombios_dig_encoder_setup(encoder,
+						   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+						   dig->panel_mode);
+			if (ext_encoder) {
+				if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+					atombios_external_encoder_setup(encoder, ext_encoder,
+									EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+			}
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+		} else if (ASIC_IS_DCE4(rdev)) {
+			/* setup and enable the encoder */
+			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+			/* enable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+		} else {
+			/* setup and enable the encoder and transmitter */
+			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+			/* some early dce3.2 boards have a bug in their transmitter control table */
+			if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+		}
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+				radeon_dig_connector->edp_on = true;
+			}
+			radeon_dp_link_train(encoder, connector);
+			if (ASIC_IS_DCE4(rdev))
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+			/* disable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+		} else if (ASIC_IS_DCE4(rdev)) {
+			/* disable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+		} else {
+			/* disable the encoder and transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+		}
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			if (ASIC_IS_DCE4(rdev))
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+				radeon_dig_connector->edp_on = false;
+			}
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
+			     struct drm_encoder *ext_encoder,
+			     int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	default:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+		} else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+		} else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+	DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+		  radeon_encoder->encoder_id, mode, radeon_encoder->devices,
+		  radeon_encoder->active_device);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		radeon_atom_encoder_dpms_dig(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		if (ASIC_IS_DCE5(rdev)) {
+			switch (mode) {
+			case DRM_MODE_DPMS_ON:
+				atombios_dvo_setup(encoder, ATOM_ENABLE);
+				break;
+			case DRM_MODE_DPMS_STANDBY:
+			case DRM_MODE_DPMS_SUSPEND:
+			case DRM_MODE_DPMS_OFF:
+				atombios_dvo_setup(encoder, ATOM_DISABLE);
+				break;
+			}
+		} else if (ASIC_IS_DCE3(rdev))
+			radeon_atom_encoder_dpms_dig(encoder, mode);
+		else
+			radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		if (ASIC_IS_DCE5(rdev)) {
+			switch (mode) {
+			case DRM_MODE_DPMS_ON:
+				atombios_dac_setup(encoder, ATOM_ENABLE);
+				break;
+			case DRM_MODE_DPMS_STANDBY:
+			case DRM_MODE_DPMS_SUSPEND:
+			case DRM_MODE_DPMS_OFF:
+				atombios_dac_setup(encoder, ATOM_DISABLE);
+				break;
+			}
+		} else
+			radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	default:
+		return;
+	}
+
+	if (ext_encoder)
+		radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
+
+	radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+union crtc_source_param {
+	SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+	SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	union crtc_source_param args;
+	int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+	uint8_t frev, crev;
+	struct radeon_encoder_atom_dig *dig;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+		default:
+			if (ASIC_IS_AVIVO(rdev))
+				args.v1.ucCRTC = radeon_crtc->crtc_id;
+			else {
+				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+					args.v1.ucCRTC = radeon_crtc->crtc_id;
+				} else {
+					args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+				}
+			}
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+			case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+				if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+					args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+			case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+				break;
+			}
+			break;
+		case 2:
+			args.v2.ucCRTC = radeon_crtc->crtc_id;
+			if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
+				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+				if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+				else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+				else
+					args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+			} else
+				args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				dig = radeon_encoder->enc_priv;
+				switch (dig->dig_encoder) {
+				case 0:
+					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+					break;
+				case 1:
+					args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+					break;
+				case 2:
+					args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+					break;
+				case 3:
+					args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+					break;
+				case 4:
+					args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+					break;
+				case 5:
+					args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+					break;
+				}
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+				break;
+			}
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* update scratch regs with new routing */
+	radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+	/* Funky macbooks */
+	if ((dev->pci_device == 0x71C5) &&
+	    (dev->pci_subvendor == 0x106b) &&
+	    (dev->pci_subdevice == 0x0080)) {
+		if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+			uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+			WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+		}
+	}
+
+	/* set scaler clears this on some chips */
+	if (ASIC_IS_AVIVO(rdev) &&
+	    (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+		if (ASIC_IS_DCE4(rdev)) {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       EVERGREEN_INTERLEAVE_EN);
+			else
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		} else {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       AVIVO_D1MODE_INTERLEAVE_EN);
+			else
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		}
+	}
+}
+
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *test_encoder;
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t dig_enc_in_use = 0;
+
+	if (ASIC_IS_DCE6(rdev)) {
+		/* DCE6 */
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			if (dig->linkb)
+				return 1;
+			else
+				return 0;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			if (dig->linkb)
+				return 3;
+			else
+				return 2;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			if (dig->linkb)
+				return 5;
+			else
+				return 4;
+			break;
+		}
+	} else if (ASIC_IS_DCE4(rdev)) {
+		/* DCE4/5 */
+		if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
+			/* ontario follows DCE4 */
+			if (rdev->family == CHIP_PALM) {
+				if (dig->linkb)
+					return 1;
+				else
+					return 0;
+			} else
+				/* llano follows DCE3.2 */
+				return radeon_crtc->crtc_id;
+		} else {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					return 1;
+				else
+					return 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					return 3;
+				else
+					return 2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					return 5;
+				else
+					return 4;
+				break;
+			}
+		}
+	}
+
+	/* on DCE32 and encoder can driver any block so just crtc id */
+	if (ASIC_IS_DCE32(rdev)) {
+		return radeon_crtc->crtc_id;
+	}
+
+	/* on DCE3 - LVTMA can only be driven by DIGB */
+	list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_test_encoder;
+
+		if (encoder == test_encoder)
+			continue;
+
+		if (!radeon_encoder_is_digital(test_encoder))
+			continue;
+
+		radeon_test_encoder = to_radeon_encoder(test_encoder);
+		dig = radeon_test_encoder->enc_priv;
+
+		if (dig->dig_encoder >= 0)
+			dig_enc_in_use |= (1 << dig->dig_encoder);
+	}
+
+	if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+		if (dig_enc_in_use & 0x2)
+			DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+		return 1;
+	}
+	if (!(dig_enc_in_use & 1))
+		return 0;
+	return 1;
+}
+
+/* This only needs to be called once at startup */
+void
+radeon_atom_encoder_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+			break;
+		default:
+			break;
+		}
+
+		if (ext_encoder && (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)))
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+	}
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+	/* need to call this here rather than in prepare() since we need some crtc info */
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+		if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+			atombios_yuv_setup(encoder, true);
+		else
+			atombios_yuv_setup(encoder, false);
+	}
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		/* handled in dpms */
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_dvo_setup(encoder, ATOM_ENABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_ENABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+			if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+				atombios_tv_setup(encoder, ATOM_ENABLE);
+			else
+				atombios_tv_setup(encoder, ATOM_DISABLE);
+		}
+		break;
+	}
+
+	atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+		r600_hdmi_enable(encoder);
+		if (ASIC_IS_DCE6(rdev))
+			; /* TODO (use pointers instead of if-s?) */
+		else if (ASIC_IS_DCE4(rdev))
+			evergreen_hdmi_setmode(encoder, adjusted_mode);
+		else
+			r600_hdmi_setmode(encoder, adjusted_mode);
+	}
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+				       ATOM_DEVICE_CV_SUPPORT |
+				       ATOM_DEVICE_CRT_SUPPORT)) {
+		DAC_LOAD_DETECTION_PS_ALLOCATION args;
+		int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+		uint8_t frev, crev;
+
+		memset(&args, 0, sizeof(args));
+
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+			return false;
+
+		args.sDacload.ucMisc = 0;
+
+		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+			args.sDacload.ucDacType = ATOM_DAC_A;
+		else
+			args.sDacload.ucDacType = ATOM_DAC_B;
+
+		if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+		else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+		else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		}
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		return true;
+	} else
+		return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	uint32_t bios_0_scratch;
+
+	if (!atombios_dac_load_detect(encoder, connector)) {
+		DRM_DEBUG_KMS("detect returned false \n");
+		return connector_status_unknown;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+	else
+		bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+	u32 bios_0_scratch;
+
+	if (!ASIC_IS_DCE4(rdev))
+		return connector_status_unknown;
+
+	if (!ext_encoder)
+		return connector_status_unknown;
+
+	if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+		return connector_status_unknown;
+
+	/* load detect on the dp bridge */
+	atombios_external_encoder_setup(encoder, ext_encoder,
+					EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+	bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+	if (ext_encoder)
+		/* ddc_setup on the dp bridge */
+		atombios_external_encoder_setup(encoder, ext_encoder,
+						EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+	if ((radeon_encoder->active_device &
+	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+	     ENCODER_OBJECT_ID_NONE)) {
+		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+		if (dig) {
+			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+			if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+				if (rdev->family >= CHIP_R600)
+					dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+				else
+					/* RS600/690/740 have only 1 afmt block */
+					dig->afmt = rdev->mode_info.afmt[0];
+			}
+		}
+	}
+
+	radeon_atom_output_lock(encoder, true);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		/* select the clock/data port if it uses a router */
+		if (radeon_connector->router.cd_valid)
+			radeon_router_select_cd_port(radeon_connector);
+
+		/* turn eDP panel on for mode set */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_ON);
+	}
+
+	/* this is needed for the pll/ss setup to work correctly in some cases */
+	atombios_set_encoder_crtc_source(encoder);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+	/* need to call this here as we need the crtc set up */
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+	radeon_atom_output_lock(encoder, false);
+}
+
+static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+
+	/* check for pre-DCE3 cards with shared encoders;
+	 * can't really use the links individually, so don't disable
+	 * the encoder if it's in use by another connector
+	 */
+	if (!ASIC_IS_DCE3(rdev)) {
+		struct drm_encoder *other_encoder;
+		struct radeon_encoder *other_radeon_encoder;
+
+		list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+			other_radeon_encoder = to_radeon_encoder(other_encoder);
+			if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+			    drm_helper_encoder_in_use(other_encoder))
+				goto disable_done;
+		}
+	}
+
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		/* handled in dpms */
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_dvo_setup(encoder, ATOM_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_DISABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+			atombios_tv_setup(encoder, ATOM_DISABLE);
+		break;
+	}
+
+disable_done:
+	if (radeon_encoder_is_digital(encoder)) {
+		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+			r600_hdmi_disable(encoder);
+		dig = radeon_encoder->enc_priv;
+		dig->dig_encoder = -1;
+	}
+	radeon_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+			 struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
+				       const struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+	.dpms = radeon_atom_ext_dpms,
+	.mode_fixup = radeon_atom_ext_mode_fixup,
+	.prepare = radeon_atom_ext_prepare,
+	.mode_set = radeon_atom_ext_mode_set,
+	.commit = radeon_atom_ext_commit,
+	.disable = radeon_atom_ext_disable,
+	/* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+	.dpms = radeon_atom_encoder_dpms,
+	.mode_fixup = radeon_atom_mode_fixup,
+	.prepare = radeon_atom_encoder_prepare,
+	.mode_set = radeon_atom_encoder_mode_set,
+	.commit = radeon_atom_encoder_commit,
+	.disable = radeon_atom_encoder_disable,
+	.detect = radeon_atom_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+	.dpms = radeon_atom_encoder_dpms,
+	.mode_fixup = radeon_atom_mode_fixup,
+	.prepare = radeon_atom_encoder_prepare,
+	.mode_set = radeon_atom_encoder_mode_set,
+	.commit = radeon_atom_encoder_commit,
+	.detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_atom_backlight_exit(radeon_encoder);
+	free(radeon_encoder->enc_priv, DRM_MEM_DRIVER);
+	drm_encoder_cleanup(encoder);
+	free(radeon_encoder, DRM_MEM_DRIVER);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static struct radeon_encoder_atom_dac *
+radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_atom_dac *dac = malloc(sizeof(struct radeon_encoder_atom_dac),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+	if (!dac)
+		return NULL;
+
+	dac->tv_std = radeon_atombios_get_tv_info(rdev);
+	return dac;
+}
+
+static struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+	int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+	struct radeon_encoder_atom_dig *dig = malloc(sizeof(struct radeon_encoder_atom_dig),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+	if (!dig)
+		return NULL;
+
+	/* coherent mode by default */
+	dig->coherent_mode = true;
+	dig->dig_encoder = -1;
+
+	if (encoder_enum == 2)
+		dig->linkb = true;
+	else
+		dig->linkb = false;
+
+	return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev,
+			uint32_t encoder_enum,
+			uint32_t supported_device,
+			u16 caps)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->encoder_enum == encoder_enum) {
+			radeon_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	radeon_encoder = malloc(sizeof(struct radeon_encoder),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!radeon_encoder)
+		return;
+
+	encoder = &radeon_encoder->base;
+	switch (rdev->num_crtc) {
+	case 1:
+		encoder->possible_crtcs = 0x1;
+		break;
+	case 2:
+	default:
+		encoder->possible_crtcs = 0x3;
+		break;
+	case 4:
+		encoder->possible_crtcs = 0xf;
+		break;
+	case 6:
+		encoder->possible_crtcs = 0x3f;
+		break;
+	}
+
+	radeon_encoder->enc_priv = NULL;
+
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	radeon_encoder->devices = supported_device;
+	radeon_encoder->rmx_type = RMX_OFF;
+	radeon_encoder->underscan_type = UNDERSCAN_OFF;
+	radeon_encoder->is_ext_encoder = false;
+	radeon_encoder->caps = caps;
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			radeon_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		}
+		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			radeon_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		}
+		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_SI170B:
+	case ENCODER_OBJECT_ID_CH7303:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+	case ENCODER_OBJECT_ID_TITFP513:
+	case ENCODER_OBJECT_ID_VT1623:
+	case ENCODER_OBJECT_ID_HDMI_SI1930:
+	case ENCODER_OBJECT_ID_TRAVIS:
+	case ENCODER_OBJECT_ID_NUTMEG:
+		/* these are handled by the primary encoders */
+		radeon_encoder->is_ext_encoder = true;
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+		else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+		else
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+		drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+		break;
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/atombios_encoders.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/atombios_i2c.c
===================================================================
--- trunk/sys/dev/drm2/radeon/atombios_i2c.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/atombios_i2c.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,211 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/atombios_i2c.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include <dev/iicbus/iic.h>
+#include <dev/iicbus/iiconf.h>
+#include <dev/iicbus/iicbus.h>
+#include "radeon.h"
+#include "atom.h"
+#include "iicbus_if.h"
+#include "iicbb_if.h"
+
+#define TARGET_HW_I2C_CLOCK 50
+
+/* these are a limitation of ProcessI2cChannelTransaction not the hw */
+#define ATOM_MAX_HW_I2C_WRITE 2
+#define ATOM_MAX_HW_I2C_READ  255
+
+static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
+				 u8 slave_addr, u8 flags,
+				 u8 *buf, u8 num)
+{
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+	unsigned char *base;
+	u16 out;
+
+	memset(&args, 0, sizeof(args));
+
+	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+	if (flags & HW_I2C_WRITE) {
+		if (num > ATOM_MAX_HW_I2C_WRITE) {
+			DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
+			return -EINVAL;
+		}
+		memcpy(&out, buf, num);
+		args.lpI2CDataOut = cpu_to_le16(out);
+	} else {
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+		if (num > ATOM_MAX_HW_I2C_READ) {
+			DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
+			return -EINVAL;
+		}
+#endif
+	}
+
+	args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+	args.ucRegIndex = 0;
+	args.ucTransBytes = num;
+	args.ucSlaveAddr = slave_addr << 1;
+	args.ucLineNumber = chan->rec.i2c_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* error */
+	if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
+		DRM_DEBUG_KMS("hw_i2c error\n");
+		return -EIO;
+	}
+
+	if (!(flags & HW_I2C_WRITE))
+		memcpy(buf, base, num);
+
+	return 0;
+}
+
+static int
+radeon_atom_hw_i2c_xfer(device_t dev, struct iic_msg *msgs, u_int num)
+{
+	struct radeon_i2c_chan *i2c = device_get_softc(dev);
+	struct iic_msg *p;
+	int i, remaining, current_count, buffer_offset, max_bytes, ret;
+	u8 buf = 0, flags;
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		ret = radeon_process_i2c_ch(i2c,
+					    p->slave, HW_I2C_WRITE,
+					    &buf, 1);
+		if (ret)
+			return -ret; /* "ret" is returned on Linux. */
+		else
+			return (0); /* "num" is returned on Linux. */
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		remaining = p->len;
+		buffer_offset = 0;
+		/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
+		if (p->flags & IIC_M_RD) {
+			max_bytes = ATOM_MAX_HW_I2C_READ;
+			flags = HW_I2C_READ;
+		} else {
+			max_bytes = ATOM_MAX_HW_I2C_WRITE;
+			flags = HW_I2C_WRITE;
+		}
+		while (remaining) {
+			if (remaining > max_bytes)
+				current_count = max_bytes;
+			else
+				current_count = remaining;
+			ret = radeon_process_i2c_ch(i2c,
+						    p->slave, flags,
+						    &p->buf[buffer_offset], current_count);
+			if (ret)
+				return -ret; /* "ret" is returned on Linux. */
+			remaining -= current_count;
+			buffer_offset += current_count;
+		}
+	}
+
+	return (0); /* "num" is returned on Linux. */
+}
+
+static int
+radeon_atom_hw_i2c_probe(device_t dev)
+{
+
+	return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+radeon_atom_hw_i2c_attach(device_t dev)
+{
+	struct radeon_i2c_chan *i2c;
+	device_t iic_dev;
+
+	i2c = device_get_softc(dev);
+	device_set_desc(dev, i2c->name);
+
+	/* add generic bit-banging code */
+	iic_dev = device_add_child(dev, "iicbus", -1);
+	if (iic_dev == NULL)
+		return (ENXIO);
+	device_quiet(iic_dev);
+
+	/* attach and probe added child */
+	bus_generic_attach(dev);
+
+	return (0);
+}
+
+static int
+radeon_atom_hw_i2c_detach(device_t dev)
+{
+	/* detach bit-banding code. */
+	bus_generic_detach(dev);
+
+	/* delete bit-banding code. */
+	device_delete_children(dev);
+	return (0);
+}
+
+static int
+radeon_atom_hw_i2c_reset(device_t dev, u_char speed,
+    u_char addr, u_char *oldaddr)
+{
+
+	return (0);
+}
+
+static device_method_t radeon_atom_hw_i2c_methods[] = {
+	DEVMETHOD(device_probe,		radeon_atom_hw_i2c_probe),
+	DEVMETHOD(device_attach,	radeon_atom_hw_i2c_attach),
+	DEVMETHOD(device_detach,	radeon_atom_hw_i2c_detach),
+	DEVMETHOD(iicbus_reset,		radeon_atom_hw_i2c_reset),
+	DEVMETHOD(iicbus_transfer,	radeon_atom_hw_i2c_xfer),
+	DEVMETHOD_END
+};
+
+static driver_t radeon_atom_hw_i2c_driver = {
+	"radeon_atom_hw_i2c",
+	radeon_atom_hw_i2c_methods,
+	0
+};
+
+static devclass_t radeon_atom_hw_i2c_devclass;
+DRIVER_MODULE_ORDERED(radeon_atom_hw_i2c, drmn, radeon_atom_hw_i2c_driver,
+    radeon_atom_hw_i2c_devclass, 0, 0, SI_ORDER_ANY);


Property changes on: trunk/sys/dev/drm2/radeon/atombios_i2c.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/avivod.h
===================================================================
--- trunk/sys/dev/drm2/radeon/avivod.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/avivod.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,66 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef AVIVOD_H
+#define AVIVOD_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/avivod.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+
+#define	D1CRTC_CONTROL					0x6080
+#define		CRTC_EN						(1 << 0)
+#define	D1CRTC_STATUS					0x609c
+#define	D1CRTC_UPDATE_LOCK				0x60E8
+#define	D1GRPH_PRIMARY_SURFACE_ADDRESS			0x6110
+#define	D1GRPH_SECONDARY_SURFACE_ADDRESS		0x6118
+
+#define	D2CRTC_CONTROL					0x6880
+#define	D2CRTC_STATUS					0x689c
+#define	D2CRTC_UPDATE_LOCK				0x68E8
+#define	D2GRPH_PRIMARY_SURFACE_ADDRESS			0x6910
+#define	D2GRPH_SECONDARY_SURFACE_ADDRESS		0x6918
+
+#define	D1VGA_CONTROL					0x0330
+#define		DVGA_CONTROL_MODE_ENABLE			(1 << 0)
+#define		DVGA_CONTROL_TIMING_SELECT			(1 << 8)
+#define		DVGA_CONTROL_SYNC_POLARITY_SELECT		(1 << 9)
+#define		DVGA_CONTROL_OVERSCAN_TIMING_SELECT		(1 << 10)
+#define		DVGA_CONTROL_OVERSCAN_COLOR_EN			(1 << 16)
+#define		DVGA_CONTROL_ROTATE				(1 << 24)
+#define D2VGA_CONTROL					0x0338
+
+#define	VGA_HDP_CONTROL					0x328
+#define		VGA_MEM_PAGE_SELECT_EN				(1 << 0)
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+#define		VGA_RBBM_LOCK_DISABLE				(1 << 8)
+#define		VGA_SOFT_RESET					(1 << 16)
+#define	VGA_MEMORY_BASE_ADDRESS				0x0310
+#define	VGA_RENDER_CONTROL				0x0300
+#define		VGA_VSTATUS_CNTL_MASK				0x00030000
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/avivod.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/cayman_blit_shaders.c
===================================================================
--- trunk/sys/dev/drm2/radeon/cayman_blit_shaders.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/cayman_blit_shaders.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,376 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher at amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/cayman_blit_shaders.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The regsiter state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 cayman_default_state[] =
+{
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0036900,
+	0x0000000f,
+	0x00000000, /* DB_DEPTH_INFO */
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0026900,
+	0x000000d9,
+	0x00000000, /* CP_RINGID */
+	0x00000000, /* CP_VMID */
+
+	0xc0096900,
+	0x00000100,
+	0x00ffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0016900,
+	0x00000187,
+	0x00000100, /* SPI_VS_OUT_ID_0 */
+
+	0xc0026900,
+	0x00000191,
+	0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+	0x00000101, /* SPI_PS_INPUT_CNTL_1 */
+
+	0xc0016900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+
+	0xc0106900,
+	0x000001b3,
+	0x20000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00100000, /* SPI_BARYC_CNTL */
+	0x00000000, /* SPI_PS_IN_CONTROL_2 */
+	0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
+	0x00000000, /* SPI_GPR_MGMT */
+	0x00000000, /* SPI_LDS_MGMT */
+	0x00000000, /* SPI_STACK_MGMT */
+	0x00000000, /* SPI_WAVE_MGMT_1 */
+	0x00000000, /* SPI_WAVE_MGMT_2 */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc00e6900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+	0x00000000, /* DB_EQAA */
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_START_FS */
+	0x00000000,
+
+	0xc0016900,
+	0x0000023b,
+	0x00000000, /* SQ_LDS_ALLOC_PS */
+
+	0xc0066900,
+	0x00000240,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000247,
+	0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000,
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000,
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000,
+
+	0xc01b6900,
+	0x000002f5,
+	0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+	0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+	0xffffffff,
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 cayman_vs[] =
+{
+	0x00000004,
+	0x80400400,
+	0x0000a03c,
+	0x95000688,
+	0x00004000,
+	0x15000688,
+	0x00000000,
+	0x88000000,
+	0x04000000,
+	0x67961001,
+#ifdef __BIG_ENDIAN
+	0x00020000,
+#else
+	0x00000000,
+#endif
+	0x00000000,
+	0x04000000,
+	0x67961000,
+#ifdef __BIG_ENDIAN
+	0x00020008,
+#else
+	0x00000008,
+#endif
+	0x00000000,
+};
+
+const u32 cayman_ps[] =
+{
+	0x00000004,
+	0xa00c0000,
+	0x00000008,
+	0x80400000,
+	0x00000000,
+	0x95000688,
+	0x00000000,
+	0x88000000,
+	0x00380400,
+	0x00146b10,
+	0x00380000,
+	0x20146b10,
+	0x00380400,
+	0x40146b00,
+	0x80380000,
+	0x60146b00,
+	0x00000010,
+	0x000d1000,
+	0xb0800000,
+	0x00000000,
+};
+
+const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
+const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
+const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);


Property changes on: trunk/sys/dev/drm2/radeon/cayman_blit_shaders.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/cayman_blit_shaders.h
===================================================================
--- trunk/sys/dev/drm2/radeon/cayman_blit_shaders.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/cayman_blit_shaders.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,39 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CAYMAN_BLIT_SHADERS_H
+#define CAYMAN_BLIT_SHADERS_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/cayman_blit_shaders.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+extern const u32 cayman_ps[];
+extern const u32 cayman_vs[];
+extern const u32 cayman_default_state[];
+
+extern const u32 cayman_ps_size, cayman_vs_size;
+extern const u32 cayman_default_size;
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/cayman_blit_shaders.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/cayman_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/cayman_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/cayman_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,518 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/cayman_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned cayman_reg_safe_bm[2047] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFEF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF, 0xCFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFDDEFFF, 0xCF3FFFFF, 0xFFFFE00F,
+	0xFEFFFFDF, 0xFFFFFFFF, 0xFFFFFFEF, 0xEFFFFFFF,
+	0xFFFFFFCC, 0xFFFFFFFF, 0xFFFFFFFF, 0xBFFFFFD7,
+	0xFFFBF8FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF7FFE, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFB, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFDF0FFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xC0000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFC3E4, 0xFFFFFFFF, 0x0000FFFF, 0x00000000,
+	0x000CC000, 0x00000000, 0xFF500000, 0x00000000,
+	0x00000E00, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0xFFFFFCF8, 0xFE07FF00,
+	0x3CF1F003, 0xE39E7BCF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xE7020000, 0xDDD898DD, 0x279FA3FD, 0x011FFFF0,
+	0xBFFF0000, 0xEFC3DF87, 0x7BF0F7E1, 0x1EFC3DF8,
+	0xDFBF0F7E, 0xFFFFF7EF, 0xFFFFFFFF, 0x00000000,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xCFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8,
+};


Property changes on: trunk/sys/dev/drm2/radeon/cayman_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreen.c
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreen.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreen.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,3869 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreen.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "evergreend.h"
+#include "atom.h"
+#include "avivod.h"
+#include "evergreen_reg.h"
+#include "evergreen_blit_shaders.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
+static const u32 crtc_offsets[6] =
+{
+	EVERGREEN_CRTC0_REGISTER_OFFSET,
+	EVERGREEN_CRTC1_REGISTER_OFFSET,
+	EVERGREEN_CRTC2_REGISTER_OFFSET,
+	EVERGREEN_CRTC3_REGISTER_OFFSET,
+	EVERGREEN_CRTC4_REGISTER_OFFSET,
+	EVERGREEN_CRTC5_REGISTER_OFFSET
+};
+
+static void evergreen_gpu_init(struct radeon_device *rdev);
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+void evergreen_fini(struct radeon_device *rdev);
+#endif
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+				     int ring, u32 cp_int_cntl);
+#endif
+
+void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+			     unsigned *bankh, unsigned *mtaspect,
+			     unsigned *tile_split)
+{
+	*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+	*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+	*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+	*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+	switch (*bankw) {
+	default:
+	case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
+	case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
+	case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
+	case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
+	}
+	switch (*bankh) {
+	default:
+	case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
+	case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
+	case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
+	case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
+	}
+	switch (*mtaspect) {
+	default:
+	case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
+	case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
+	case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
+	case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
+	}
+}
+
+void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+{
+	u16 ctl, v;
+	int err, cap;
+
+	err = pci_find_cap(rdev->dev, PCIY_EXPRESS, &cap);
+	if (err)
+		return;
+
+	cap += PCIER_DEVICE_CTL;
+
+	ctl = pci_read_config(rdev->dev, cap, 2);
+
+	v = (ctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12;
+
+	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
+	 * to avoid hangs or perfomance issues
+	 */
+	if ((v == 0) || (v == 6) || (v == 7)) {
+		ctl &= ~PCIEM_CTL_MAX_READ_REQUEST;
+		ctl |= (2 << 12);
+		pci_write_config(rdev->dev, cap, ctl, 2);
+	}
+}
+
+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+		return true;
+	else
+		return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 pos1, pos2;
+
+	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	if (pos1 != pos2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce4_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+		return;
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (dce4_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!dce4_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!dce4_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!dce4_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (evergreen+).
+ * Enables the pageflip irq (vblank irq).
+ */
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+/**
+ * evergreen_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (evergreen+).
+ * Disables the pageflip irq (vblank irq).
+ */
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+/**
+ * evergreen_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+	int i;
+
+	/* Lock the graphics update lock */
+	tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+}
+
+/* get temperature in millidegrees */
+int evergreen_get_temp(struct radeon_device *rdev)
+{
+	u32 temp, toffset;
+	int actual_temp = 0;
+
+	if (rdev->family == CHIP_JUNIPER) {
+		toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
+			TOFFSET_SHIFT;
+		temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
+			TS0_ADC_DOUT_SHIFT;
+
+		if (toffset & 0x100)
+			actual_temp = temp / 2 - (0x200 - toffset);
+		else
+			actual_temp = temp / 2 + toffset;
+
+		actual_temp = actual_temp * 1000;
+
+	} else {
+		temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+			ASIC_T_SHIFT;
+
+		if (temp & 0x400)
+			actual_temp = -256;
+		else if (temp & 0x200)
+			actual_temp = 255;
+		else if (temp & 0x100) {
+			actual_temp = temp & 0x1ff;
+			actual_temp |= ~0x1ff;
+		} else
+			actual_temp = temp & 0xff;
+
+		actual_temp = (actual_temp * 1000) / 2;
+	}
+
+	return actual_temp;
+}
+
+int sumo_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+	int actual_temp = temp - 49;
+
+	return actual_temp * 1000;
+}
+
+/**
+ * sumo_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (sumo, trinity, SI).
+ * Used for profile mode only.
+ */
+void sumo_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+
+	/* low,mid sh/mh */
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+	else
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+
+	/* high sh/mh */
+	idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
+		rdev->pm.power_state[idx].num_clock_modes - 1;
+
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
+		rdev->pm.power_state[idx].num_clock_modes - 1;
+}
+
+/**
+ * btc_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (BTC, cayman).
+ * Used for profile mode only.
+ */
+void btc_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+	/* starting with BTC, there is one state that is used for both
+	 * MH and SH.  Difference is that we always use the high clock index for
+	 * mclk.
+	 */
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+	else
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+}
+
+/**
+ * evergreen_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, etc.) (evergreen+).
+ */
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if (voltage->type == VOLTAGE_SW) {
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->voltage == 0xff01)
+			return;
+		if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+		}
+
+		/* starting with BTC, there is one state that is used for both
+		 * MH and SH.  Difference is that we always use the high clock index for
+		 * mclk and vddci.
+		 */
+		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+		    (rdev->family >= CHIP_BARTS) &&
+		    rdev->pm.active_crtc_count &&
+		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+			voltage = &rdev->pm.power_state[req_ps_idx].
+				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
+
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->vddci == 0xff01)
+			return;
+		if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+			radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+			rdev->pm.current_vddci = voltage->vddci;
+			DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
+		}
+	}
+}
+
+/**
+ * evergreen_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (evergreen+).
+ */
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/**
+ * evergreen_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (evergreen+).
+ */
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/**
+ * evergreen_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_3:
+		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_4:
+		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_5:
+		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_6:
+		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+			break;
+	default:
+		break;
+	}
+
+	return connected;
+}
+
+/**
+ * evergreen_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+				enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = evergreen_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_3:
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_4:
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_5:
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+	case RADEON_HPD_6:
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * evergreen_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void evergreen_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enabled = 0;
+	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 * also avoid interrupt storms during dpms.
+			 */
+			continue;
+		}
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(DC_HPD1_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			WREG32(DC_HPD2_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			WREG32(DC_HPD3_CONTROL, tmp);
+			break;
+		case RADEON_HPD_4:
+			WREG32(DC_HPD4_CONTROL, tmp);
+			break;
+		case RADEON_HPD_5:
+			WREG32(DC_HPD5_CONTROL, tmp);
+			break;
+		case RADEON_HPD_6:
+			WREG32(DC_HPD6_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+		enabled |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_enable_hpd(rdev, enabled);
+}
+
+/**
+ * evergreen_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void evergreen_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disabled = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(DC_HPD1_CONTROL, 0);
+			break;
+		case RADEON_HPD_2:
+			WREG32(DC_HPD2_CONTROL, 0);
+			break;
+		case RADEON_HPD_3:
+			WREG32(DC_HPD3_CONTROL, 0);
+			break;
+		case RADEON_HPD_4:
+			WREG32(DC_HPD4_CONTROL, 0);
+			break;
+		case RADEON_HPD_5:
+			WREG32(DC_HPD5_CONTROL, 0);
+			break;
+		case RADEON_HPD_6:
+			WREG32(DC_HPD6_CONTROL, 0);
+			break;
+		default:
+			break;
+		}
+		disabled |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disabled);
+}
+
+/* watermark setup */
+
+static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+					struct radeon_crtc *radeon_crtc,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *other_mode)
+{
+	u32 tmp;
+	/*
+	 * Line Buffer Setup
+	 * There are 3 line buffers, each one shared by 2 display controllers.
+	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning is done via one of four
+	 * preset allocations specified in bits 2:0:
+	 * first display controller
+	 *  0 - first half of lb (3840 * 2)
+	 *  1 - first 3/4 of lb (5760 * 2)
+	 *  2 - whole lb (7680 * 2), other crtc must be disabled
+	 *  3 - first 1/4 of lb (1920 * 2)
+	 * second display controller
+	 *  4 - second half of lb (3840 * 2)
+	 *  5 - second 3/4 of lb (5760 * 2)
+	 *  6 - whole lb (7680 * 2), other crtc must be disabled
+	 *  7 - last 1/4 of lb (1920 * 2)
+	 */
+	/* this can get tricky if we have two large displays on a paired group
+	 * of crtcs.  Ideally for multiple large displays we'd assign them to
+	 * non-linked crtcs for maximum line buffer allocation.
+	 */
+	if (radeon_crtc->base.enabled && mode) {
+		if (other_mode)
+			tmp = 0; /* 1/2 */
+		else
+			tmp = 2; /* whole */
+	} else
+		tmp = 0;
+
+	/* second controller of the pair uses second half of the lb */
+	if (radeon_crtc->crtc_id % 2)
+		tmp += 4;
+	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
+	if (radeon_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		case 4:
+		default:
+			if (ASIC_IS_DCE5(rdev))
+				return 4096 * 2;
+			else
+				return 3840 * 2;
+		case 1:
+		case 5:
+			if (ASIC_IS_DCE5(rdev))
+				return 6144 * 2;
+			else
+				return 5760 * 2;
+		case 2:
+		case 6:
+			if (ASIC_IS_DCE5(rdev))
+				return 8192 * 2;
+			else
+				return 7680 * 2;
+		case 3:
+		case 7:
+			if (ASIC_IS_DCE5(rdev))
+				return 2048 * 2;
+			else
+				return 1920 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	}
+}
+
+struct evergreen_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, disp_clk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
+	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
+{
+	/* First calcualte the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = evergreen_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(c, b);
+	c.full = dfixed_const(wm->bytes_per_pixel);
+	b.full = dfixed_mul(b, c);
+
+	lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+	if (evergreen_average_bandwidth(wm) <=
+	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
+{
+	if (evergreen_average_bandwidth(wm) <=
+	    (evergreen_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (evergreen_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+static void evergreen_program_watermarks(struct radeon_device *rdev,
+					 struct radeon_crtc *radeon_crtc,
+					 u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &radeon_crtc->base.mode;
+	struct evergreen_wm_params wm;
+	u32 pixel_period;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 priority_a_mark = 0, priority_b_mark = 0;
+	u32 priority_a_cnt = PRIORITY_OFF;
+	u32 priority_b_cnt = PRIORITY_OFF;
+	u32 pipe_offset = radeon_crtc->crtc_id * 16;
+	u32 tmp, arb_control3;
+	fixed20_12 a, b, c;
+
+	if (radeon_crtc->base.enabled && num_heads && mode) {
+		pixel_period = 1000000 / (u32)mode->clock;
+		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		priority_a_cnt = 0;
+		priority_b_cnt = 0;
+
+		wm.yclk = rdev->pm.current_mclk * 10;
+		wm.sclk = rdev->pm.current_sclk * 10;
+		wm.disp_clk = mode->clock;
+		wm.src_width = mode->crtc_hdisplay;
+		wm.active_time = mode->crtc_hdisplay * pixel_period;
+		wm.blank_time = line_time - wm.active_time;
+		wm.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm.interlaced = true;
+		wm.vsc = radeon_crtc->vsc;
+		wm.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm.vtaps = 2;
+		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm.lb_size = lb_size;
+		wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+		wm.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
+		/* set for low clocks */
+		/* wm.yclk = low clk; wm.sclk = low clk */
+		latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
+		    !evergreen_check_latency_hiding(&wm) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_a);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_a_mark = dfixed_trunc(c);
+		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_b);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_b_mark = dfixed_trunc(c);
+		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+	}
+
+	/* select wm A */
+	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+	tmp = arb_control3;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
+
+	/* write the priority marks */
+	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
+/**
+ * evergreen_bandwidth_update - update display watermarks callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the display watermarks based on the requested mode(s)
+ * (evergreen+).
+ */
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	radeon_update_display_priority(rdev);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < rdev->num_crtc; i += 2) {
+		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+	}
+}
+
+/**
+ * evergreen_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Wait for the MC (memory controller) to be idle.
+ * (evergreen+).
+ * Returns 0 if the MC is idle, -1 if not.
+ */
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(SRBM_STATUS) & 0x1F00;
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+/*
+ * GART
+ */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+		if (tmp == 2) {
+			DRM_ERROR("[drm] r600 flush TLB failed\n");
+			return;
+		}
+		if (tmp) {
+			return;
+		}
+		udelay(1);
+	}
+}
+
+static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	if (rdev->flags & RADEON_IS_IGP) {
+		WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
+		WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
+		WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
+	} else {
+		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+		if ((rdev->family == CHIP_JUNIPER) ||
+		    (rdev->family == CHIP_CYPRESS) ||
+		    (rdev->family == CHIP_HEMLOCK) ||
+		    (rdev->family == CHIP_BARTS))
+			WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+	}
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL, 0);
+
+	evergreen_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+{
+	evergreen_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+
+static void evergreen_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+}
+
+void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+	u32 crtc_enabled, tmp, frame_count, blackout;
+	int i, j;
+
+	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+	/* disable VGA render */
+	WREG32(VGA_RENDER_CONTROL, 0);
+	/* blank the display controllers */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			if (ASIC_IS_DCE6(rdev)) {
+				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+					radeon_wait_for_vblank(rdev, i);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				}
+			} else {
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+					radeon_wait_for_vblank(rdev, i);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+				}
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+
+			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			save->crtc_enabled[i] = false;
+			/* ***** */
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+
+	radeon_mc_wait_for_idle(rdev);
+
+	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	if ((blackout & BLACKOUT_MODE_MASK) != 1) {
+		/* Block CPU access */
+		WREG32(BIF_FB_EN, 0);
+		/* blackout the MC */
+		blackout &= ~BLACKOUT_MODE_MASK;
+		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+	}
+	/* wait for the MC to settle */
+	udelay(100);
+
+	/* lock double buffered regs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+			if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+				tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (!(tmp & 1)) {
+				tmp |= 1;
+				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+		}
+	}
+}
+
+void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+	u32 tmp, frame_count;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+	}
+	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+	/* unlock regs and wait for update */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			if ((tmp & 0x3) != 0) {
+				tmp &= ~0x3;
+				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (tmp & 1) {
+				tmp &= ~1;
+				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+
+	/* unblackout the MC */
+	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	tmp &= ~BLACKOUT_MODE_MASK;
+	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+	/* allow CPU access */
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			if (ASIC_IS_DCE6(rdev)) {
+				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			} else {
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	/* Unlock vga access */
+	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+	mdelay(1);
+	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+void evergreen_mc_program(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture*/
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	/* llano/ontario only */
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2)) {
+		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+	}
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	evergreen_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+/*
+ * CP.
+ */
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 next_rptr;
+
+	/* set to DX10/11 mode */
+	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(ring, 1);
+
+	if (ring->rptr_save_reg) {
+		next_rptr = ring->wptr + 3 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg - 
+					  PACKET3_SET_CONFIG_REG_START) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	} else if (rdev->wb.enabled) {
+		next_rptr = ring->wptr + 5 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+		radeon_ring_write(ring, next_rptr);
+		radeon_ring_write(ring, 0);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+
+static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r700_cp_stop(rdev);
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+	uint32_t cp_me;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	cp_me = 0xff;
+	WREG32(CP_ME_CNTL, cp_me);
+
+	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < evergreen_default_size; i++)
+		radeon_ring_write(ring, evergreen_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	/* SQ_VTX_BASE_VTX_LOC */
+	radeon_ring_write(ring, 0xc0026f00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* Clear consts */
+	radeon_ring_write(ring, 0xc0036f00);
+	radeon_ring_write(ring, 0x00000bc4);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+
+	radeon_ring_write(ring, 0xc0026900);
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /*  */
+
+	radeon_ring_unlock_commit(rdev, ring);
+
+	return 0;
+}
+
+static int evergreen_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_SH |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SPI |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	/* Set ring buffer size */
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB_CNTL, tmp);
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+	WREG32(CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB_RPTR_ADDR,
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB_CNTL, tmp);
+
+	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+	ring->rptr = RREG32(CP_RB_RPTR);
+
+	evergreen_cp_start(rdev);
+	ring->ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+	return 0;
+}
+
+/*
+ * Core functions
+ */
+static void evergreen_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 sq_config;
+	u32 sq_lds_resource_mgmt;
+	u32 sq_gpr_resource_mgmt_1;
+	u32 sq_gpr_resource_mgmt_2;
+	u32 sq_gpr_resource_mgmt_3;
+	u32 sq_thread_resource_mgmt;
+	u32 sq_thread_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_1;
+	u32 sq_stack_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_3;
+	u32 vgt_cache_invalidation;
+	u32 hdp_host_path_cntl, tmp;
+	u32 disabled_rb_mask;
+	int i, j, num_shader_engines, ps_thread_count;
+
+	switch (rdev->family) {
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 10;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_JUNIPER:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 10;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_REDWOOD:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 5;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_CEDAR:
+	default:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_PALM:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_SUMO:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		if (rdev->ddev->pci_device == 0x9648)
+			rdev->config.evergreen.max_simds = 3;
+		else if ((rdev->ddev->pci_device == 0x9647) ||
+			 (rdev->ddev->pci_device == 0x964a))
+			rdev->config.evergreen.max_simds = 4;
+		else
+			rdev->config.evergreen.max_simds = 5;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_SUMO2:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_BARTS:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 7;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_TURKS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 6;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_CAICOS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2))
+		mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
+	else
+		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.evergreen.tile_config = 0;
+	switch (rdev->config.evergreen.max_tile_pipes) {
+	case 1:
+	default:
+		rdev->config.evergreen.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.evergreen.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.evergreen.tile_config |= (2 << 0);
+		break;
+	case 8:
+		rdev->config.evergreen.tile_config |= (3 << 0);
+		break;
+	}
+	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->config.evergreen.tile_config |= 1 << 4;
+	else {
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
+			rdev->config.evergreen.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.evergreen.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.evergreen.tile_config |= 2 << 4;
+			break;
+		}
+	}
+	rdev->config.evergreen.tile_config |= 0 << 8;
+	rdev->config.evergreen.tile_config |=
+		((gb_addr_config & 0x30000000) >> 28) << 12;
+
+	num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
+
+	if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+		u32 efuse_straps_4;
+		u32 efuse_straps_3;
+
+		WREG32(RCU_IND_INDEX, 0x204);
+		efuse_straps_4 = RREG32(RCU_IND_DATA);
+		WREG32(RCU_IND_INDEX, 0x203);
+		efuse_straps_3 = RREG32(RCU_IND_DATA);
+		tmp = (((efuse_straps_4 & 0xf) << 4) |
+		      ((efuse_straps_3 & 0xf0000000) >> 28));
+	} else {
+		tmp = 0;
+		for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+			u32 rb_disable_bitmap;
+
+			WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+			WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+			rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+			tmp <<= 4;
+			tmp |= rb_disable_bitmap;
+		}
+	}
+	/* enabled rb are just the one not disabled :) */
+	disabled_rb_mask = tmp;
+
+	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG, gb_addr_config);
+
+	if ((rdev->config.evergreen.max_backends == 1) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		if ((disabled_rb_mask & 3) == 1) {
+			/* RB0 disabled, RB1 enabled */
+			tmp = 0x11111111;
+		} else {
+			/* RB1 disabled, RB0 enabled */
+			tmp = 0x00000000;
+		}
+	} else {
+		tmp = gb_addr_config & NUM_PIPES_MASK;
+		tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+						EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+	}
+	WREG32(GB_BACKEND_MAP, tmp);
+
+	WREG32(CGTS_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+
+	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+			     SYNC_GRADIENT |
+			     SYNC_WALKER |
+			     SYNC_ALIGNER));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	if (rdev->family <= CHIP_SUMO2)
+		WREG32(SMX_SAR_CTL0, 0x00010000);
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+	WREG32(SPI_CONFIG_CNTL, 0);
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+				  FETCH_FIFO_HIWATER(0x4) |
+				  DONE_FIFO_HIWATER(0xe0) |
+				  ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (VC_ENABLE |
+		      EXPORT_SRC_C |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_CAICOS:
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+		break;
+	default:
+		break;
+	}
+
+	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+		ps_thread_count = 96;
+		break;
+	default:
+		ps_thread_count = 128;
+		break;
+	}
+
+	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+
+	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+	WREG32(SQ_CONFIG, sq_config);
+	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_CAICOS:
+		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+		break;
+	default:
+		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+		break;
+	}
+	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
+	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
+
+	WREG32(CB_PERF_CTR0_SEL_0, 0);
+	WREG32(CB_PERF_CTR0_SEL_1, 0);
+	WREG32(CB_PERF_CTR1_SEL_0, 0);
+	WREG32(CB_PERF_CTR1_SEL_1, 0);
+	WREG32(CB_PERF_CTR2_SEL_0, 0);
+	WREG32(CB_PERF_CTR2_SEL_1, 0);
+	WREG32(CB_PERF_CTR3_SEL_0, 0);
+	WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+	/* clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+	WREG32(CB_COLOR8_BASE, 0);
+	WREG32(CB_COLOR9_BASE, 0);
+	WREG32(CB_COLOR10_BASE, 0);
+	WREG32(CB_COLOR11_BASE, 0);
+
+	/* set the shader const cache sizes to 0 */
+	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
+		WREG32(i, 0);
+	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
+		WREG32(i, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+
+}
+
+int evergreen_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2))
+		tmp = RREG32(FUS_MC_ARB_RAMCFG);
+	else
+		tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+	/* Setup GPU memory space */
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2)) {
+		/* size in bytes on fusion */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	} else {
+		/* size in MB on evergreen/cayman/tn */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+	}
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r700_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 srbm_status;
+	u32 grbm_status;
+	u32 grbm_status_se0, grbm_status_se1;
+
+	srbm_status = RREG32(SRBM_STATUS);
+	grbm_status = RREG32(GRBM_STATUS);
+	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+	if (!(grbm_status & GUI_ACTIVE)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
+{
+	u32 grbm_reset = 0;
+
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		return;
+
+	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
+		RREG32(SRBM_STATUS));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		RREG32(CP_STAT));
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+	/* reset all the gfx blocks */
+	grbm_reset = (SOFT_RESET_CP |
+		      SOFT_RESET_CB |
+		      SOFT_RESET_DB |
+		      SOFT_RESET_PA |
+		      SOFT_RESET_SC |
+		      SOFT_RESET_SPI |
+		      SOFT_RESET_SH |
+		      SOFT_RESET_SX |
+		      SOFT_RESET_TC |
+		      SOFT_RESET_TA |
+		      SOFT_RESET_VC |
+		      SOFT_RESET_VGT);
+
+	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+	WREG32(GRBM_SOFT_RESET, grbm_reset);
+	(void)RREG32(GRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(GRBM_SOFT_RESET, 0);
+	(void)RREG32(GRBM_SOFT_RESET);
+
+	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
+		RREG32(SRBM_STATUS));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		RREG32(CP_STAT));
+}
+
+static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+		return;
+
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+
+	/* Disable DMA */
+	tmp = RREG32(DMA_RB_CNTL);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, tmp);
+
+	/* Reset dma */
+	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+		reset_mask &= ~RADEON_RESET_DMA;
+
+	if (reset_mask == 0)
+		return 0;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+		evergreen_gpu_soft_reset_gfx(rdev);
+
+	if (reset_mask & RADEON_RESET_DMA)
+		evergreen_gpu_soft_reset_dma(rdev);
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	return 0;
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+	return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+					       RADEON_RESET_COMPUTE |
+					       RADEON_RESET_DMA));
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc >= rdev->num_crtc)
+		return 0;
+	else
+		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		cayman_cp_int_cntl_setup(rdev, 0,
+					 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+		cayman_cp_int_cntl_setup(rdev, 1, 0);
+		cayman_cp_int_cntl_setup(rdev, 2, 0);
+		tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+		WREG32(CAYMAN_DMA1_CNTL, tmp);
+	} else
+		WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	if (rdev->num_crtc >= 4) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	/* only one DAC on DCE6 */
+	if (!ASIC_IS_DCE6(rdev))
+		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD1_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD2_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD3_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD4_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD5_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+	u32 grbm_int_cntl = 0;
+	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+	u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+	u32 dma_cntl, dma_cntl1 = 0;
+
+	if (!rdev->irq.installed) {
+		dev_warn(rdev->dev, "Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		r600_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		evergreen_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+	afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+
+	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		/* enable CP interrupts on all rings */
+		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+		}
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+			cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+		}
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+			cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+		}
+	} else {
+		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+			cp_int_cntl |= RB_INT_ENABLE;
+			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+		}
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+			DRM_DEBUG("r600_irq_set: sw int dma1\n");
+			dma_cntl1 |= TRAP_ENABLE;
+		}
+	}
+
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+		crtc1 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+		crtc2 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[2] ||
+	    atomic_read(&rdev->irq.pflip[2])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+		crtc3 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[3] ||
+	    atomic_read(&rdev->irq.pflip[3])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+		crtc4 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[4] ||
+	    atomic_read(&rdev->irq.pflip[4])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+		crtc5 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[5] ||
+	    atomic_read(&rdev->irq.pflip[5])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+		crtc6 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.afmt[0]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
+		afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[1]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
+		afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[2]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
+		afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[3]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
+		afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[4]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
+		afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[5]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
+		afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+		cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+		cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+	} else
+		WREG32(CP_INT_CNTL, cp_int_cntl);
+
+	WREG32(DMA_CNTL, dma_cntl);
+
+	if (rdev->family >= CHIP_CAYMAN)
+		WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+	if (rdev->num_crtc >= 4) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	}
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+	}
+
+	WREG32(DC_HPD1_INT_CONTROL, hpd1);
+	WREG32(DC_HPD2_INT_CONTROL, hpd2);
+	WREG32(DC_HPD3_INT_CONTROL, hpd3);
+	WREG32(DC_HPD4_INT_CONTROL, hpd4);
+	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+
+	return 0;
+}
+
+static void evergreen_irq_ack(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	if (rdev->num_crtc >= 4) {
+		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	}
+	if (rdev->num_crtc >= 6) {
+		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	}
+
+	rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+	if (rdev->num_crtc >= 4) {
+		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->num_crtc >= 6) {
+		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
+	}
+}
+
+static void evergreen_irq_disable(struct radeon_device *rdev)
+{
+	r600_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	evergreen_irq_ack(rdev);
+	evergreen_disable_interrupt_state(rdev);
+}
+
+void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+	evergreen_irq_disable(rdev);
+	r600_rlc_stop(rdev);
+}
+
+static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+irqreturn_t evergreen_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data;
+	u32 ring_index;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	wptr = evergreen_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	evergreen_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[0]))
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D1 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D1 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 2: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[1]))
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D2 vblank\n");
+				}
+				break;
+			case 1: /* D2 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D2 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 3: /* D3 vblank/vline */
+			switch (src_data) {
+			case 0: /* D3 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[2]) {
+						drm_handle_vblank(rdev->ddev, 2);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[2]))
+						radeon_crtc_handle_flip(rdev, 2);
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D3 vblank\n");
+				}
+				break;
+			case 1: /* D3 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D3 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 4: /* D4 vblank/vline */
+			switch (src_data) {
+			case 0: /* D4 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[3]) {
+						drm_handle_vblank(rdev->ddev, 3);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[3]))
+						radeon_crtc_handle_flip(rdev, 3);
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D4 vblank\n");
+				}
+				break;
+			case 1: /* D4 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D4 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D5 vblank/vline */
+			switch (src_data) {
+			case 0: /* D5 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[4]) {
+						drm_handle_vblank(rdev->ddev, 4);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[4]))
+						radeon_crtc_handle_flip(rdev, 4);
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D5 vblank\n");
+				}
+				break;
+			case 1: /* D5 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D5 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 6: /* D6 vblank/vline */
+			switch (src_data) {
+			case 0: /* D6 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[5]) {
+						drm_handle_vblank(rdev->ddev, 5);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[5]))
+						radeon_crtc_handle_flip(rdev, 5);
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D6 vblank\n");
+				}
+				break;
+			case 1: /* D6 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D6 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 42: /* HPD hotplug */
+			switch (src_data) {
+			case 0:
+				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD1\n");
+				}
+				break;
+			case 1:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD2\n");
+				}
+				break;
+			case 2:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD3\n");
+				}
+				break;
+			case 3:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD4\n");
+				}
+				break;
+			case 4:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD5\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD6\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 44: /* hdmi */
+			switch (src_data) {
+			case 0:
+				if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI0\n");
+				}
+				break;
+			case 1:
+				if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI1\n");
+				}
+				break;
+			case 2:
+				if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI2\n");
+				}
+				break;
+			case 3:
+				if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI3\n");
+				}
+				break;
+			case 4:
+				if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI4\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI5\n");
+				}
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 146:
+		case 147:
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			break;
+		case 176: /* CP_INT in ring buffer */
+		case 177: /* CP_INT in IB1 */
+		case 178: /* CP_INT in IB2 */
+			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			if (rdev->family >= CHIP_CAYMAN) {
+				switch (src_data) {
+				case 0:
+					radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+					break;
+				case 1:
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+					break;
+				case 2:
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+					break;
+				}
+			} else
+				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		case 244: /* DMA trap event */
+			if (rdev->family >= CHIP_CAYMAN) {
+				DRM_DEBUG("IH: DMA1 trap\n");
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+			}
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+	}
+	if (queue_hotplug)
+		taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
+	if (queue_hdmi)
+		taskqueue_enqueue(rdev->tq, &rdev->audio_work);
+	rdev->ih.rptr = rptr;
+	WREG32(IH_RB_RPTR, rdev->ih.rptr);
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = evergreen_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+				   struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+	/* write the fence */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	radeon_ring_write(ring, fence->seq);
+	/* generate an interrupt */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+	/* flush HDP */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+				   struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+		       uint64_t src_offset, uint64_t dst_offset,
+		       unsigned num_gpu_pages,
+		       struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFFF)
+			cur_size_in_dw = 0xFFFFF;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
+static int evergreen_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* enable pcie gen2 link */
+	evergreen_pcie_gen2_enable(rdev);
+
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+		r = ni_mc_load_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load MC firmware!\n");
+			return r;
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = r600_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	evergreen_mc_program(rdev);
+	if (rdev->flags & RADEON_IS_AGP) {
+		evergreen_agp_enable(rdev);
+	} else {
+		r = evergreen_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	evergreen_gpu_init(rdev);
+
+	r = evergreen_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy.copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	evergreen_irq_set(rdev);
+
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = evergreen_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = evergreen_cp_resume(rdev);
+	if (r)
+		return r;
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int evergreen_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* reset the asic, the gfx blocks are often in a bad state
+	 * after the driver is unloaded or after a resume
+	 */
+	if (radeon_asic_reset(rdev))
+		dev_warn(rdev->dev, "GPU reset failed !\n");
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	rdev->accel_working = true;
+	r = evergreen_startup(rdev);
+	if (r) {
+		DRM_ERROR("evergreen startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int evergreen_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r700_cp_stop(rdev);
+	r600_dma_stop(rdev);
+	evergreen_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	evergreen_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int evergreen_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* reset the asic, the gfx blocks are often in a bad state
+	 * after the driver is unloaded or after a resume
+	 */
+	if (radeon_asic_reset(rdev))
+		dev_warn(rdev->dev, "GPU reset failed !\n");
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	/* initialize memory controller */
+	r = evergreen_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = evergreen_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r700_cp_fini(rdev);
+		r600_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		evergreen_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing on BTC parts.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 */
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+			DRM_ERROR("radeon: MC ucode required for NI+.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void evergreen_fini(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r600_blit_fini(rdev);
+	r700_cp_fini(rdev);
+	r600_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	evergreen_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	if (ASIC_IS_DCE5(rdev))
+		ni_fini_microcode(rdev);
+	else
+		r600_fini_microcode(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, speed_cntl, mask;
+	int ret;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+	if (ret != 0)
+		return;
+
+	if (!(mask & DRM_PCIE_SPEED_50))
+		return;
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if (speed_cntl & LC_CURRENT_DATA_RATE) {
+		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+		return;
+	}
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/evergreen.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreen_blit_kms.c
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreen_blit_kms.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreen_blit_kms.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,734 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher at amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreen_blit_kms.c 261455 2014-02-04 03:36:42Z eadler $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+
+#include "evergreend.h"
+#include "evergreen_blit_shaders.h"
+#include "cayman_blit_shaders.h"
+#include "radeon_blit_common.h"
+
+/* emits 17 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+		  int w, int h, u64 gpu_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 cb_color_info;
+	int pitch, slice;
+
+	h = roundup2(h, 8);
+	if (h < 8)
+		h = 8;
+
+	cb_color_info = CB_FORMAT(format) |
+		CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
+		CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+	pitch = (w / 8) - 1;
+	slice = ((w * h) / 64) - 1;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+	radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, pitch);
+	radeon_ring_write(ring, slice);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, cb_color_info);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+		    u32 sync_type, u32 size,
+		    u64 mc_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 cp_coher_size;
+
+	if (size == 0xffffffff)
+		cp_coher_size = 0xffffffff;
+	else
+		cp_coher_size = ((size + 255) >> 8);
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		/* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
+		 * to the RB directly. For IBs, the CP programs this as part of the
+		 * surface_sync packet.
+		 */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, sync_type);
+	radeon_ring_write(ring, cp_coher_size);
+	radeon_ring_write(ring, mc_addr >> 8);
+	radeon_ring_write(ring, 10); /* poll interval */
+}
+
+/* emits 11dw + 1 surface sync = 16dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u64 gpu_addr;
+
+	/* VS */
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+	radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, 2);
+	radeon_ring_write(ring, 0);
+
+	/* PS */
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+	radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, 1);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 2);
+
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+	cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 10 + 1 sync (5) = 15 */
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
+
+	/* high addr, stride */
+	sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+		SQ_VTXC_STRIDE(16);
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+#endif
+	/* xyzw swizzles */
+	sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
+		SQ_VTCX_SEL_Y(SQ_SEL_Y) |
+		SQ_VTCX_SEL_Z(SQ_SEL_Z) |
+		SQ_VTCX_SEL_W(SQ_SEL_W);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+	radeon_ring_write(ring, 0x580);
+	radeon_ring_write(ring, gpu_addr & 0xffffffff);
+	radeon_ring_write(ring, 48 - 1); /* size */
+	radeon_ring_write(ring, sq_vtx_constant_word2);
+	radeon_ring_write(ring, sq_vtx_constant_word3);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+
+	if ((rdev->family == CHIP_CEDAR) ||
+	    (rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2) ||
+	    (rdev->family == CHIP_CAICOS))
+		cp_set_surface_sync(rdev,
+				    PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+	else
+		cp_set_surface_sync(rdev,
+				    PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+
+}
+
+/* emits 10 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+		 int format, int w, int h, int pitch,
+		 u64 gpu_addr, u32 size)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_tex_resource_word0, sq_tex_resource_word1;
+	u32 sq_tex_resource_word4, sq_tex_resource_word7;
+
+	if (h < 1)
+		h = 1;
+
+	sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
+	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
+				  ((w - 1) << 18));
+	sq_tex_resource_word1 = ((h - 1) << 0) |
+				TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+	/* xyzw swizzles */
+	sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
+				TEX_DST_SEL_Y(SQ_SEL_Y) |
+				TEX_DST_SEL_Z(SQ_SEL_Z) |
+				TEX_DST_SEL_W(SQ_SEL_W);
+
+	sq_tex_resource_word7 = format |
+		S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
+
+	cp_set_surface_sync(rdev,
+			    PACKET3_TC_ACTION_ENA, size, gpu_addr);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, sq_tex_resource_word0);
+	radeon_ring_write(ring, sq_tex_resource_word1);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, sq_tex_resource_word4);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, sq_tex_resource_word7);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+	     int x2, int y2)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	/* workaround some hw bugs */
+	if (x2 == 0)
+		x1 = 1;
+	if (y2 == 0)
+		y1 = 1;
+	if (rdev->family >= CHIP_CAYMAN) {
+		if ((x2 == 1) && (y2 == 1))
+			x2 = 2;
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1U << 31));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1U << 31));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+	radeon_ring_write(ring, DI_PT_RECTLIST);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+	radeon_ring_write(ring, 1);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+	radeon_ring_write(ring, 3);
+	radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 39 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
+	u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
+	int num_ps_gprs, num_vs_gprs, num_temp_gprs;
+	int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
+	int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+	int num_hs_threads, num_ls_threads;
+	int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+	int num_hs_stack_entries, num_ls_stack_entries;
+	u64 gpu_addr;
+	int dwords;
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	if (rdev->family < CHIP_CAYMAN) {
+		switch (rdev->family) {
+		case CHIP_CEDAR:
+		default:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 96;
+			num_vs_threads = 16;
+			num_gs_threads = 16;
+			num_es_threads = 16;
+			num_hs_threads = 16;
+			num_ls_threads = 16;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_REDWOOD:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_JUNIPER:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 85;
+			num_vs_stack_entries = 85;
+			num_gs_stack_entries = 85;
+			num_es_stack_entries = 85;
+			num_hs_stack_entries = 85;
+			num_ls_stack_entries = 85;
+			break;
+		case CHIP_CYPRESS:
+		case CHIP_HEMLOCK:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 85;
+			num_vs_stack_entries = 85;
+			num_gs_stack_entries = 85;
+			num_es_stack_entries = 85;
+			num_hs_stack_entries = 85;
+			num_ls_stack_entries = 85;
+			break;
+		case CHIP_PALM:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 96;
+			num_vs_threads = 16;
+			num_gs_threads = 16;
+			num_es_threads = 16;
+			num_hs_threads = 16;
+			num_ls_threads = 16;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_SUMO:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 96;
+			num_vs_threads = 25;
+			num_gs_threads = 25;
+			num_es_threads = 25;
+			num_hs_threads = 25;
+			num_ls_threads = 25;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_SUMO2:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 96;
+			num_vs_threads = 25;
+			num_gs_threads = 25;
+			num_es_threads = 25;
+			num_hs_threads = 25;
+			num_ls_threads = 25;
+			num_ps_stack_entries = 85;
+			num_vs_stack_entries = 85;
+			num_gs_stack_entries = 85;
+			num_es_stack_entries = 85;
+			num_hs_stack_entries = 85;
+			num_ls_stack_entries = 85;
+			break;
+		case CHIP_BARTS:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 85;
+			num_vs_stack_entries = 85;
+			num_gs_stack_entries = 85;
+			num_es_stack_entries = 85;
+			num_hs_stack_entries = 85;
+			num_ls_stack_entries = 85;
+			break;
+		case CHIP_TURKS:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_CAICOS:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 10;
+			num_gs_threads = 10;
+			num_es_threads = 10;
+			num_hs_threads = 10;
+			num_ls_threads = 10;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		}
+
+		if ((rdev->family == CHIP_CEDAR) ||
+		    (rdev->family == CHIP_PALM) ||
+		    (rdev->family == CHIP_SUMO) ||
+		    (rdev->family == CHIP_SUMO2) ||
+		    (rdev->family == CHIP_CAICOS))
+			sq_config = 0;
+		else
+			sq_config = VC_ENABLE;
+
+		sq_config |= (EXPORT_SRC_C |
+			      CS_PRIO(0) |
+			      LS_PRIO(0) |
+			      HS_PRIO(0) |
+			      PS_PRIO(0) |
+			      VS_PRIO(1) |
+			      GS_PRIO(2) |
+			      ES_PRIO(3));
+
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+					  NUM_VS_GPRS(num_vs_gprs) |
+					  NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+					  NUM_ES_GPRS(num_es_gprs));
+		sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
+					  NUM_LS_GPRS(num_ls_gprs));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+					   NUM_VS_THREADS(num_vs_threads) |
+					   NUM_GS_THREADS(num_gs_threads) |
+					   NUM_ES_THREADS(num_es_threads));
+		sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
+					     NUM_LS_THREADS(num_ls_threads));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+					    NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+					    NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+		sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
+					    NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+
+		/* disable dyn gprs */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, 0);
+
+		/* setup LDS */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, 0x10001000);
+
+		/* SQ config */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+		radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, sq_config);
+		radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+		radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+		radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, sq_thread_resource_mgmt);
+		radeon_ring_write(ring, sq_thread_resource_mgmt_2);
+		radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+		radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+		radeon_ring_write(ring, sq_stack_resource_mgmt_3);
+	}
+
+	/* CONTEXT_CONTROL */
+	radeon_ring_write(ring, 0xc0012800);
+	radeon_ring_write(ring, 0x80000000);
+	radeon_ring_write(ring, 0x80000000);
+
+	/* SQ_VTX_BASE_VTX_LOC */
+	radeon_ring_write(ring, 0xc0026f00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* SET_SAMPLER */
+	radeon_ring_write(ring, 0xc0036e00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000012);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* set to DX10/11 mode */
+	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(ring, 1);
+
+	/* emit an IB pointing at default state */
+	dwords = roundup2(rdev->r600_blit.state_len, 0x10);
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+	radeon_ring_write(ring, dwords);
+
+}
+
+int evergreen_blit_init(struct radeon_device *rdev)
+{
+	u32 obj_size;
+	int i, r, dwords;
+	void *ptr;
+	u32 packet2s[16];
+	int num_packet2s = 0;
+
+	rdev->r600_blit.primitives.set_render_target = set_render_target;
+	rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
+	rdev->r600_blit.primitives.set_shaders = set_shaders;
+	rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
+	rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
+	rdev->r600_blit.primitives.set_scissors = set_scissors;
+	rdev->r600_blit.primitives.draw_auto = draw_auto;
+	rdev->r600_blit.primitives.set_default_state = set_default_state;
+
+	rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+	rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
+	rdev->r600_blit.ring_size_common += 5; /* done copy */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
+
+	rdev->r600_blit.ring_size_per_loop = 74;
+	if (rdev->family >= CHIP_CAYMAN)
+		rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
+
+	rdev->r600_blit.max_dim = 16384;
+
+	rdev->r600_blit.state_offset = 0;
+
+	if (rdev->family < CHIP_CAYMAN)
+		rdev->r600_blit.state_len = evergreen_default_size;
+	else
+		rdev->r600_blit.state_len = cayman_default_size;
+
+	dwords = rdev->r600_blit.state_len;
+	while (dwords & 0xf) {
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+		dwords++;
+	}
+
+	obj_size = dwords * 4;
+	obj_size = roundup2(obj_size, 256);
+
+	rdev->r600_blit.vs_offset = obj_size;
+	if (rdev->family < CHIP_CAYMAN)
+		obj_size += evergreen_vs_size * 4;
+	else
+		obj_size += cayman_vs_size * 4;
+	obj_size = roundup2(obj_size, 256);
+
+	rdev->r600_blit.ps_offset = obj_size;
+	if (rdev->family < CHIP_CAYMAN)
+		obj_size += evergreen_ps_size * 4;
+	else
+		obj_size += cayman_ps_size * 4;
+	obj_size = roundup2(obj_size, 256);
+
+	/* pin copy shader into vram if not already initialized */
+	if (!rdev->r600_blit.shader_obj) {
+		r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM,
+				     NULL, &rdev->r600_blit.shader_obj);
+		if (r) {
+			DRM_ERROR("evergreen failed to allocate shader\n");
+			return r;
+		}
+
+		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+				  &rdev->r600_blit.shader_gpu_addr);
+		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+			return r;
+		}
+	}
+
+	DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
+		  obj_size,
+		  rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+	if (r) {
+		DRM_ERROR("failed to map blit object %d\n", r);
+		return r;
+	}
+
+	if (rdev->family < CHIP_CAYMAN) {
+		memcpy_toio((char *)ptr + rdev->r600_blit.state_offset,
+			    evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+		if (num_packet2s)
+			memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+				    packet2s, num_packet2s * 4);
+		for (i = 0; i < evergreen_vs_size; i++)
+			*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+		for (i = 0; i < evergreen_ps_size; i++)
+			*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
+	} else {
+		memcpy_toio((char *)ptr + rdev->r600_blit.state_offset,
+			    cayman_default_state, rdev->r600_blit.state_len * 4);
+
+		if (num_packet2s)
+			memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+				    packet2s, num_packet2s * 4);
+		for (i = 0; i < cayman_vs_size; i++)
+			*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
+		for (i = 0; i < cayman_ps_size; i++)
+			*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
+	}
+	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/evergreen_blit_kms.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.c
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,359 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher at amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreen_blit_shaders.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The regsiter state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 evergreen_default_state[] =
+{
+	0xc0016900,
+	0x0000023b,
+	0x00000000, /* SQ_LDS_ALLOC_PS */
+
+	0xc0066900,
+	0x00000240,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000247,
+	0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x00000010,
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0106900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc00d6900,
+	0x00000202,
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x0000022a,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0096900,
+	0x00000100,
+	0x00ffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /*  */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000, /*  */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc0016900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+
+	0xc0016900,
+	0x00000187,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc00b6900,
+	0x000001b3,
+	0x20000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00100000, /* SPI_BARYC_CNTL */
+	0x00000000, /* SPI_PS_IN_CONTROL_2 */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 evergreen_vs[] =
+{
+	0x00000004,
+	0x80800400,
+	0x0000a03c,
+	0x95000688,
+	0x00004000,
+	0x15200688,
+	0x00000000,
+	0x00000000,
+	0x3c000000,
+	0x67961001,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
+	0x00080000,
+#endif
+	0x00000000,
+	0x1c000000,
+	0x67961000,
+#ifdef __BIG_ENDIAN
+	0x00020008,
+#else
+	0x00000008,
+#endif
+	0x00000000,
+};
+
+const u32 evergreen_ps[] =
+{
+	0x00000003,
+	0xa00c0000,
+	0x00000008,
+	0x80400000,
+	0x00000000,
+	0x95200688,
+	0x00380400,
+	0x00146b10,
+	0x00380000,
+	0x20146b10,
+	0x00380400,
+	0x40146b00,
+	0x80380000,
+	0x60146b00,
+	0x00000000,
+	0x00000000,
+	0x00000010,
+	0x000d1000,
+	0xb0800000,
+	0x00000000,
+};
+
+const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
+const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
+const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);


Property changes on: trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.h
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,39 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreen_blit_shaders.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef EVERGREEN_BLIT_SHADERS_H
+#define EVERGREEN_BLIT_SHADERS_H
+
+extern const u32 evergreen_ps[];
+extern const u32 evergreen_vs[];
+extern const u32 evergreen_default_state[];
+
+extern const u32 evergreen_ps_size, evergreen_vs_size;
+extern const u32 evergreen_default_size;
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/evergreen_blit_shaders.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreen_cs.c
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreen_cs.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreen_cs.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,3732 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreen_cs.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "evergreen_reg_safe.h"
+#include "cayman_reg_safe.h"
+#include "r600_cs.h"
+
+#define MAX(a,b)                   (((a)>(b))?(a):(b))
+#define MIN(a,b)                   (((a)<(b))?(a):(b))
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+			   struct radeon_cs_reloc **cs_reloc);
+#endif
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+					  struct radeon_cs_reloc **cs_reloc);
+
+struct evergreen_cs_track {
+	u32			group_size;
+	u32			nbanks;
+	u32			npipes;
+	u32			row_size;
+	/* value we track */
+	u32			nsamples;		/* unused */
+	struct radeon_bo	*cb_color_bo[12];
+	u32			cb_color_bo_offset[12];
+	struct radeon_bo	*cb_color_fmask_bo[8];	/* unused */
+	struct radeon_bo	*cb_color_cmask_bo[8];	/* unused */
+	u32			cb_color_info[12];
+	u32			cb_color_view[12];
+	u32			cb_color_pitch[12];
+	u32			cb_color_slice[12];
+	u32			cb_color_slice_idx[12];
+	u32			cb_color_attrib[12];
+	u32			cb_color_cmask_slice[8];/* unused */
+	u32			cb_color_fmask_slice[8];/* unused */
+	u32			cb_target_mask;
+	u32			cb_shader_mask; /* unused */
+	u32			vgt_strmout_config;
+	u32			vgt_strmout_buffer_config;
+	struct radeon_bo	*vgt_strmout_bo[4];
+	u32			vgt_strmout_bo_offset[4];
+	u32			vgt_strmout_size[4];
+	u32			db_depth_control;
+	u32			db_depth_view;
+	u32			db_depth_slice;
+	u32			db_depth_size;
+	u32			db_z_info;
+	u32			db_z_read_offset;
+	u32			db_z_write_offset;
+	struct radeon_bo	*db_z_read_bo;
+	struct radeon_bo	*db_z_write_bo;
+	u32			db_s_info;
+	u32			db_s_read_offset;
+	u32			db_s_write_offset;
+	struct radeon_bo	*db_s_read_bo;
+	struct radeon_bo	*db_s_write_bo;
+	bool			sx_misc_kill_all_prims;
+	bool			cb_dirty;
+	bool			db_dirty;
+	bool			streamout_dirty;
+	u32			htile_offset;
+	u32			htile_surface;
+	struct radeon_bo	*htile_bo;
+};
+
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+	if (tiling_flags & RADEON_TILING_MACRO)
+		return ARRAY_2D_TILED_THIN1;
+	else if (tiling_flags & RADEON_TILING_MICRO)
+		return ARRAY_1D_TILED_THIN1;
+	else
+		return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+	switch (nbanks) {
+	case 2:
+		return ADDR_SURF_2_BANK;
+	case 4:
+		return ADDR_SURF_4_BANK;
+	case 8:
+	default:
+		return ADDR_SURF_8_BANK;
+	case 16:
+		return ADDR_SURF_16_BANK;
+	}
+}
+
+static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		track->cb_color_fmask_bo[i] = NULL;
+		track->cb_color_cmask_bo[i] = NULL;
+		track->cb_color_cmask_slice[i] = 0;
+		track->cb_color_fmask_slice[i] = 0;
+	}
+
+	for (i = 0; i < 12; i++) {
+		track->cb_color_bo[i] = NULL;
+		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+		track->cb_color_info[i] = 0;
+		track->cb_color_view[i] = 0xFFFFFFFF;
+		track->cb_color_pitch[i] = 0;
+		track->cb_color_slice[i] = 0xfffffff;
+		track->cb_color_slice_idx[i] = 0;
+	}
+	track->cb_target_mask = 0xFFFFFFFF;
+	track->cb_shader_mask = 0xFFFFFFFF;
+	track->cb_dirty = true;
+
+	track->db_depth_slice = 0xffffffff;
+	track->db_depth_view = 0xFFFFC000;
+	track->db_depth_size = 0xFFFFFFFF;
+	track->db_depth_control = 0xFFFFFFFF;
+	track->db_z_info = 0xFFFFFFFF;
+	track->db_z_read_offset = 0xFFFFFFFF;
+	track->db_z_write_offset = 0xFFFFFFFF;
+	track->db_z_read_bo = NULL;
+	track->db_z_write_bo = NULL;
+	track->db_s_info = 0xFFFFFFFF;
+	track->db_s_read_offset = 0xFFFFFFFF;
+	track->db_s_write_offset = 0xFFFFFFFF;
+	track->db_s_read_bo = NULL;
+	track->db_s_write_bo = NULL;
+	track->db_dirty = true;
+	track->htile_bo = NULL;
+	track->htile_offset = 0xFFFFFFFF;
+	track->htile_surface = 0;
+
+	for (i = 0; i < 4; i++) {
+		track->vgt_strmout_size[i] = 0;
+		track->vgt_strmout_bo[i] = NULL;
+		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+	}
+	track->streamout_dirty = true;
+	track->sx_misc_kill_all_prims = false;
+}
+
+struct eg_surface {
+	/* value gathered from cs */
+	unsigned	nbx;
+	unsigned	nby;
+	unsigned	format;
+	unsigned	mode;
+	unsigned	nbanks;
+	unsigned	bankw;
+	unsigned	bankh;
+	unsigned	tsplit;
+	unsigned	mtilea;
+	unsigned	nsamples;
+	/* output value */
+	unsigned	bpe;
+	unsigned	layer_size;
+	unsigned	palign;
+	unsigned	halign;
+	unsigned long	base_align;
+};
+
+static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
+					  struct eg_surface *surf,
+					  const char *prefix)
+{
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+	surf->base_align = surf->bpe;
+	surf->palign = 1;
+	surf->halign = 1;
+	return 0;
+}
+
+static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
+						  struct eg_surface *surf,
+						  const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign;
+
+	palign = MAX(64, track->group_size / surf->bpe);
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+	surf->base_align = track->group_size;
+	surf->palign = palign;
+	surf->halign = 1;
+	if (surf->nbx & (palign - 1)) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign);
+		}
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
+				      struct eg_surface *surf,
+				      const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign;
+
+	palign = track->group_size / (8 * surf->bpe * surf->nsamples);
+	palign = MAX(8, palign);
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe;
+	surf->base_align = track->group_size;
+	surf->palign = palign;
+	surf->halign = 8;
+	if ((surf->nbx & (palign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign,
+				 track->group_size, surf->bpe, surf->nsamples);
+		}
+		return -EINVAL;
+	}
+	if ((surf->nby & (8 - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
+				 __func__, __LINE__, prefix, surf->nby);
+		}
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
+				      struct eg_surface *surf,
+				      const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign, halign, tileb, slice_pt;
+	unsigned mtile_pr, mtile_ps, mtileb;
+
+	tileb = 64 * surf->bpe * surf->nsamples;
+	slice_pt = 1;
+	if (tileb > surf->tsplit) {
+		slice_pt = tileb / surf->tsplit;
+	}
+	tileb = tileb / slice_pt;
+	/* macro tile width & height */
+	palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
+	halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
+	mtileb = (palign / 8) * (halign / 8) * tileb;
+	mtile_pr = surf->nbx / palign;
+	mtile_ps = (mtile_pr * surf->nby) / halign;
+	surf->layer_size = mtile_ps * mtileb * slice_pt;
+	surf->base_align = (palign / 8) * (halign / 8) * tileb;
+	surf->palign = palign;
+	surf->halign = halign;
+
+	if ((surf->nbx & (palign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign);
+		}
+		return -EINVAL;
+	}
+	if ((surf->nby & (halign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nby, halign);
+		}
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int evergreen_surface_check(struct radeon_cs_parser *p,
+				   struct eg_surface *surf,
+				   const char *prefix)
+{
+	/* some common value computed here */
+	surf->bpe = r600_fmt_get_blocksize(surf->format);
+
+	switch (surf->mode) {
+	case ARRAY_LINEAR_GENERAL:
+		return evergreen_surface_check_linear(p, surf, prefix);
+	case ARRAY_LINEAR_ALIGNED:
+		return evergreen_surface_check_linear_aligned(p, surf, prefix);
+	case ARRAY_1D_TILED_THIN1:
+		return evergreen_surface_check_1d(p, surf, prefix);
+	case ARRAY_2D_TILED_THIN1:
+		return evergreen_surface_check_2d(p, surf, prefix);
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+				__func__, __LINE__, prefix, surf->mode);
+		return -EINVAL;
+	}
+	return -EINVAL;
+}
+
+static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
+					      struct eg_surface *surf,
+					      const char *prefix)
+{
+	switch (surf->mode) {
+	case ARRAY_2D_TILED_THIN1:
+		break;
+	case ARRAY_LINEAR_GENERAL:
+	case ARRAY_LINEAR_ALIGNED:
+	case ARRAY_1D_TILED_THIN1:
+		return 0;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+				__func__, __LINE__, prefix, surf->mode);
+		return -EINVAL;
+	}
+
+	switch (surf->nbanks) {
+	case 0: surf->nbanks = 2; break;
+	case 1: surf->nbanks = 4; break;
+	case 2: surf->nbanks = 8; break;
+	case 3: surf->nbanks = 16; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
+			 __func__, __LINE__, prefix, surf->nbanks);
+		return -EINVAL;
+	}
+	switch (surf->bankw) {
+	case 0: surf->bankw = 1; break;
+	case 1: surf->bankw = 2; break;
+	case 2: surf->bankw = 4; break;
+	case 3: surf->bankw = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
+			 __func__, __LINE__, prefix, surf->bankw);
+		return -EINVAL;
+	}
+	switch (surf->bankh) {
+	case 0: surf->bankh = 1; break;
+	case 1: surf->bankh = 2; break;
+	case 2: surf->bankh = 4; break;
+	case 3: surf->bankh = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
+			 __func__, __LINE__, prefix, surf->bankh);
+		return -EINVAL;
+	}
+	switch (surf->mtilea) {
+	case 0: surf->mtilea = 1; break;
+	case 1: surf->mtilea = 2; break;
+	case 2: surf->mtilea = 4; break;
+	case 3: surf->mtilea = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
+			 __func__, __LINE__, prefix, surf->mtilea);
+		return -EINVAL;
+	}
+	switch (surf->tsplit) {
+	case 0: surf->tsplit = 64; break;
+	case 1: surf->tsplit = 128; break;
+	case 2: surf->tsplit = 256; break;
+	case 3: surf->tsplit = 512; break;
+	case 4: surf->tsplit = 1024; break;
+	case 5: surf->tsplit = 2048; break;
+	case 6: surf->tsplit = 4096; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
+			 __func__, __LINE__, prefix, surf->tsplit);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
+	pitch = track->cb_color_pitch[id];
+	slice = track->cb_color_slice[id];
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
+	surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
+	surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
+	surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
+	surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
+	surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
+	surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
+	surf.nsamples = 1;
+
+	if (!r600_fmt_is_valid_color(surf.format)) {
+		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
+			 __func__, __LINE__, surf.format,
+			id, track->cb_color_info[id]);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "cb");
+	if (r) {
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, "cb");
+	if (r) {
+		dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, id, track->cb_color_pitch[id],
+			 track->cb_color_slice[id], track->cb_color_attrib[id],
+			 track->cb_color_info[id]);
+		return r;
+	}
+
+	offset = track->cb_color_bo_offset[id] << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, id, offset, surf.base_align);
+		return -EINVAL;
+	}
+
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+		/* old ddx are broken they allocate bo with w*h*bpp but
+		 * program slice with ALIGN(h, 8), catch this and patch
+		 * command stream.
+		 */
+		if (!surf.mode) {
+			volatile u32 *ib = p->ib.ptr;
+			unsigned long tmp, nby, bsize, size, min = 0;
+
+			/* find the height the ddx wants */
+			if (surf.nby > 8) {
+				min = surf.nby - 8;
+			}
+			bsize = radeon_bo_size(track->cb_color_bo[id]);
+			tmp = track->cb_color_bo_offset[id] << 8;
+			for (nby = surf.nby; nby > min; nby--) {
+				size = nby * surf.nbx * surf.bpe * surf.nsamples;
+				if ((tmp + size * mslice) <= bsize) {
+					break;
+				}
+			}
+			if (nby > min) {
+				surf.nby = nby;
+				slice = ((nby * surf.nbx) / 64) - 1;
+				if (!evergreen_surface_check(p, &surf, "cb")) {
+					/* check if this one works */
+					tmp += surf.layer_size * mslice;
+					if (tmp <= bsize) {
+						ib[track->cb_color_slice_idx[id]] = slice;
+						goto old_ddx_ok;
+					}
+				}
+			}
+		}
+		dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
+			 "offset %d, max layer %d, bo size %ld, slice %d)\n",
+			 __func__, __LINE__, id, surf.layer_size,
+			track->cb_color_bo_offset[id] << 8, mslice,
+			radeon_bo_size(track->cb_color_bo[id]), slice);
+		dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+			 __func__, __LINE__, surf.nbx, surf.nby,
+			surf.mode, surf.bpe, surf.nsamples,
+			surf.bankw, surf.bankh,
+			surf.tsplit, surf.mtilea);
+		return -EINVAL;
+	}
+old_ddx_ok:
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
+						unsigned nbx, unsigned nby)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned long size;
+
+	if (track->htile_bo == NULL) {
+		dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+				__func__, __LINE__, track->db_z_info);
+		return -EINVAL;
+	}
+
+	if (G_028ABC_LINEAR(track->htile_surface)) {
+		/* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
+		nbx = roundup(nbx, 16 * 8);
+		/* height is npipes htiles aligned == npipes * 8 pixel aligned */
+		nby = roundup(nby, track->npipes * 8);
+	} else {
+		/* always assume 8x8 htile */
+		/* align is htile align * 8, htile align vary according to
+		 * number of pipe and tile width and nby
+		 */
+		switch (track->npipes) {
+		case 8:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = roundup(nbx, 64 * 8);
+			nby = roundup(nby, 64 * 8);
+			break;
+		case 4:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = roundup(nbx, 64 * 8);
+			nby = roundup(nby, 32 * 8);
+			break;
+		case 2:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = roundup(nbx, 32 * 8);
+			nby = roundup(nby, 32 * 8);
+			break;
+		case 1:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = roundup(nbx, 32 * 8);
+			nby = roundup(nby, 16 * 8);
+			break;
+		default:
+			dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+					__func__, __LINE__, track->npipes);
+			return -EINVAL;
+		}
+	}
+	/* compute number of htile */
+	nbx = nbx >> 3;
+	nby = nby >> 3;
+	/* size must be aligned on npipes * 2K boundary */
+	size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
+	size += track->htile_offset;
+
+	if (size > radeon_bo_size(track->htile_bo)) {
+		dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+				__func__, __LINE__, radeon_bo_size(track->htile_bo),
+				size, nbx, nby);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+	slice = track->db_depth_slice;
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+	surf.format = G_028044_FORMAT(track->db_s_info);
+	surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
+	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+	surf.nsamples = 1;
+
+	if (surf.format != 1) {
+		dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+	/* replace by color format so we can use same code */
+	surf.format = V_028C70_COLOR_8;
+
+	r = evergreen_surface_value_conv_check(p, &surf, "stencil");
+	if (r) {
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, NULL);
+	if (r) {
+		/* old userspace doesn't compute proper depth/stencil alignment
+		 * check that alignment against a bigger byte per elements and
+		 * only report if that alignment is wrong too.
+		 */
+		surf.format = V_028C70_COLOR_8_8_8_8;
+		r = evergreen_surface_check(p, &surf, "stencil");
+		if (r) {
+			dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+				 __func__, __LINE__, track->db_depth_size,
+				 track->db_depth_slice, track->db_s_info, track->db_z_info);
+		}
+		return r;
+	}
+
+	offset = track->db_s_read_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_s_read_bo)) {
+		dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_s_read_offset << 8, mslice,
+			radeon_bo_size(track->db_s_read_bo));
+		dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_s_info, track->db_z_info);
+		return -EINVAL;
+	}
+
+	offset = track->db_s_write_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_s_write_bo)) {
+		dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_s_write_offset << 8, mslice,
+			radeon_bo_size(track->db_s_write_bo));
+		return -EINVAL;
+	}
+
+	/* hyperz */
+	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+		if (r) {
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+	slice = track->db_depth_slice;
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+	surf.format = G_028040_FORMAT(track->db_z_info);
+	surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
+	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+	surf.nsamples = 1;
+
+	switch (surf.format) {
+	case V_028040_Z_16:
+		surf.format = V_028C70_COLOR_16;
+		break;
+	case V_028040_Z_24:
+	case V_028040_Z_32_FLOAT:
+		surf.format = V_028C70_COLOR_8_8_8_8;
+		break;
+	default:
+		dev_warn(p->dev, "%s:%d depth invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "depth");
+	if (r) {
+		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_z_info);
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, "depth");
+	if (r) {
+		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_z_info);
+		return r;
+	}
+
+	offset = track->db_z_read_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_z_read_bo)) {
+		dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_z_read_offset << 8, mslice,
+			radeon_bo_size(track->db_z_read_bo));
+		return -EINVAL;
+	}
+
+	offset = track->db_z_write_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_z_write_bo)) {
+		dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_z_write_offset << 8, mslice,
+			radeon_bo_size(track->db_z_write_bo));
+		return -EINVAL;
+	}
+
+	/* hyperz */
+	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+		if (r) {
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
+					       struct radeon_bo *texture,
+					       struct radeon_bo *mipmap,
+					       unsigned idx)
+{
+	struct eg_surface surf;
+	unsigned long toffset, moffset;
+	unsigned dim, llevel, mslice, width, height, depth, i;
+	u32 texdw[8];
+	int r;
+
+	texdw[0] = radeon_get_ib_value(p, idx + 0);
+	texdw[1] = radeon_get_ib_value(p, idx + 1);
+	texdw[2] = radeon_get_ib_value(p, idx + 2);
+	texdw[3] = radeon_get_ib_value(p, idx + 3);
+	texdw[4] = radeon_get_ib_value(p, idx + 4);
+	texdw[5] = radeon_get_ib_value(p, idx + 5);
+	texdw[6] = radeon_get_ib_value(p, idx + 6);
+	texdw[7] = radeon_get_ib_value(p, idx + 7);
+	dim = G_030000_DIM(texdw[0]);
+	llevel = G_030014_LAST_LEVEL(texdw[5]);
+	mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
+	width = G_030000_TEX_WIDTH(texdw[0]) + 1;
+	height =  G_030004_TEX_HEIGHT(texdw[1]) + 1;
+	depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
+	surf.format = G_03001C_DATA_FORMAT(texdw[7]);
+	surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
+	surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
+	surf.nby = r600_fmt_get_nblocksy(surf.format, height);
+	surf.mode = G_030004_ARRAY_MODE(texdw[1]);
+	surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
+	surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
+	surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
+	surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
+	surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
+	surf.nsamples = 1;
+	toffset = texdw[2] << 8;
+	moffset = texdw[3] << 8;
+
+	if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
+		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+	switch (dim) {
+	case V_030000_SQ_TEX_DIM_1D:
+	case V_030000_SQ_TEX_DIM_2D:
+	case V_030000_SQ_TEX_DIM_CUBEMAP:
+	case V_030000_SQ_TEX_DIM_1D_ARRAY:
+	case V_030000_SQ_TEX_DIM_2D_ARRAY:
+		depth = 1;
+		break;
+	case V_030000_SQ_TEX_DIM_2D_MSAA:
+	case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		surf.nsamples = 1 << llevel;
+		llevel = 0;
+		depth = 1;
+		break;
+	case V_030000_SQ_TEX_DIM_3D:
+		break;
+	default:
+		dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
+			 __func__, __LINE__, dim);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "texture");
+	if (r) {
+		return r;
+	}
+
+	/* align height */
+	evergreen_surface_check(p, &surf, NULL);
+	surf.nby = roundup(surf.nby, surf.halign);
+
+	r = evergreen_surface_check(p, &surf, "texture");
+	if (r) {
+		dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
+			 texdw[5], texdw[6], texdw[7]);
+		return r;
+	}
+
+	/* check texture size */
+	if (toffset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, toffset, surf.base_align);
+		return -EINVAL;
+	}
+	if (moffset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, moffset, surf.base_align);
+		return -EINVAL;
+	}
+	if (dim == SQ_TEX_DIM_3D) {
+		toffset += surf.layer_size * depth;
+	} else {
+		toffset += surf.layer_size * mslice;
+	}
+	if (toffset > radeon_bo_size(texture)) {
+		dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
+			 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)texdw[2] << 8, mslice,
+			depth, radeon_bo_size(texture),
+			surf.nbx, surf.nby);
+		return -EINVAL;
+	}
+
+	if (!mipmap) {
+		if (llevel) {
+			dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
+				 __func__, __LINE__);
+			return -EINVAL;
+		} else {
+			return 0; /* everything's ok */
+		}
+	}
+
+	/* check mipmap size */
+	for (i = 1; i <= llevel; i++) {
+		unsigned w, h, d;
+
+		w = r600_mip_minify(width, i);
+		h = r600_mip_minify(height, i);
+		d = r600_mip_minify(depth, i);
+		surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
+		surf.nby = r600_fmt_get_nblocksy(surf.format, h);
+
+		switch (surf.mode) {
+		case ARRAY_2D_TILED_THIN1:
+			if (surf.nbx < surf.palign || surf.nby < surf.halign) {
+				surf.mode = ARRAY_1D_TILED_THIN1;
+			}
+			/* recompute alignment */
+			evergreen_surface_check(p, &surf, NULL);
+			break;
+		case ARRAY_LINEAR_GENERAL:
+		case ARRAY_LINEAR_ALIGNED:
+		case ARRAY_1D_TILED_THIN1:
+			break;
+		default:
+			dev_warn(p->dev, "%s:%d invalid array mode %d\n",
+				 __func__, __LINE__, surf.mode);
+			return -EINVAL;
+		}
+		surf.nbx = roundup(surf.nbx, surf.palign);
+		surf.nby = roundup(surf.nby, surf.halign);
+
+		r = evergreen_surface_check(p, &surf, "mipmap");
+		if (r) {
+			return r;
+		}
+
+		if (dim == SQ_TEX_DIM_3D) {
+			moffset += surf.layer_size * d;
+		} else {
+			moffset += surf.layer_size * mslice;
+		}
+		if (moffset > radeon_bo_size(mipmap)) {
+			dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
+					"offset %ld, coffset %ld, max layer %d, depth %d, "
+					"bo size %ld) level0 (%d %d %d)\n",
+					__func__, __LINE__, i, surf.layer_size,
+					(unsigned long)texdw[3] << 8, moffset, mslice,
+					d, radeon_bo_size(mipmap),
+					width, height, depth);
+			dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+				 __func__, __LINE__, surf.nbx, surf.nby,
+				surf.mode, surf.bpe, surf.nsamples,
+				surf.bankw, surf.bankh,
+				surf.tsplit, surf.mtilea);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned tmp, i;
+	int r;
+	unsigned buffer_mask = 0;
+
+	/* check streamout */
+	if (track->streamout_dirty && track->vgt_strmout_config) {
+		for (i = 0; i < 4; i++) {
+			if (track->vgt_strmout_config & (1 << i)) {
+				buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
+			}
+		}
+
+		for (i = 0; i < 4; i++) {
+			if (buffer_mask & (1 << i)) {
+				if (track->vgt_strmout_bo[i]) {
+					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+							(u64)track->vgt_strmout_size[i];
+					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+						DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n",
+							  i, (uintmax_t)offset,
+							  radeon_bo_size(track->vgt_strmout_bo[i]));
+						return -EINVAL;
+					}
+				} else {
+					dev_warn(p->dev, "No buffer for streamout %d\n", i);
+					return -EINVAL;
+				}
+			}
+		}
+		track->streamout_dirty = false;
+	}
+
+	if (track->sx_misc_kill_all_prims)
+		return 0;
+
+	/* check that we have a cb for each enabled target
+	 */
+	if (track->cb_dirty) {
+		tmp = track->cb_target_mask;
+		for (i = 0; i < 8; i++) {
+			if ((tmp >> (i * 4)) & 0xF) {
+				/* at least one component is enabled */
+				if (track->cb_color_bo[i] == NULL) {
+					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+					return -EINVAL;
+				}
+				/* check cb */
+				r = evergreen_cs_track_validate_cb(p, i);
+				if (r) {
+					return r;
+				}
+			}
+		}
+		track->cb_dirty = false;
+	}
+
+	if (track->db_dirty) {
+		/* Check stencil buffer */
+		if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
+		    G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+			r = evergreen_cs_track_validate_stencil(p);
+			if (r)
+				return r;
+		}
+		/* Check depth buffer */
+		if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
+		    G_028800_Z_ENABLE(track->db_depth_control)) {
+			r = evergreen_cs_track_validate_depth(p);
+			if (r)
+				return r;
+		}
+		track->db_dirty = false;
+	}
+
+	return 0;
+}
+
+/**
+ * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser:	parser structure holding parsing context.
+ * @pkt:	where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt,
+			      unsigned idx)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	uint32_t header;
+
+	if (idx >= ib_chunk->length_dw) {
+		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+			  idx, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	header = radeon_get_ib_value(p, idx);
+	pkt->idx = idx;
+	pkt->type = CP_PACKET_GET_TYPE(header);
+	pkt->count = CP_PACKET_GET_COUNT(header);
+	pkt->one_reg_wr = 0;
+	switch (pkt->type) {
+	case PACKET_TYPE0:
+		pkt->reg = CP_PACKET0_GET_REG(header);
+		break;
+	case PACKET_TYPE3:
+		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+		break;
+	case PACKET_TYPE2:
+		pkt->count = -1;
+		break;
+	default:
+		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+		return -EINVAL;
+	}
+	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser:		parser structure holding parsing context.
+ * @data:		pointer to relocation data
+ * @offset_start:	starting offset
+ * @offset_mask:	offset mask (to align start offset on)
+ * @reloc:		reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+					  struct radeon_cs_reloc **cs_reloc)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_cs_packet p3reloc;
+	unsigned idx;
+	int r;
+
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	*cs_reloc = NULL;
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r) {
+		return r;
+	}
+	p->idx += p3reloc.count + 2;
+	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+			  p3reloc.idx);
+		return -EINVAL;
+	}
+	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		return -EINVAL;
+	}
+	/* FIXME: we assume reloc size is 4 dwords */
+	*cs_reloc = p->relocs_ptr[(idx / 4)];
+	return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
+ * @p:		structure holding the parser context.
+ *
+ * Check if the next packet is a relocation packet3.
+ **/
+static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet p3reloc;
+	int r;
+
+	r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r) {
+		return false;
+	}
+	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+		return false;
+	}
+	return true;
+}
+
+/**
+ * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_cs_packet p3reloc, wait_reg_mem;
+	int crtc_id;
+	int r;
+	uint32_t header, h_idx, reg, wait_reg_mem_info;
+	volatile uint32_t *ib;
+
+	ib = p->ib.ptr;
+
+	/* parse the WAIT_REG_MEM */
+	r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
+	if (r)
+		return r;
+
+	/* check its a WAIT_REG_MEM */
+	if (wait_reg_mem.type != PACKET_TYPE3 ||
+	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+		return -EINVAL;
+	}
+
+	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+	/* bit 4 is reg (0) or mem (1) */
+	if (wait_reg_mem_info & 0x10) {
+		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+		return -EINVAL;
+	}
+	/* waiting for value to be equal */
+	if ((wait_reg_mem_info & 0x7) != 0x3) {
+		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+		return -EINVAL;
+	}
+	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
+		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+		return -EINVAL;
+	}
+
+	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
+		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+		return -EINVAL;
+	}
+
+	/* jump over the NOP */
+	r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+	if (r)
+		return r;
+
+	h_idx = p->idx - 2;
+	p->idx += wait_reg_mem.count + 2;
+	p->idx += p3reloc.count + 2;
+
+	header = radeon_get_ib_value(p, h_idx);
+	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+	reg = CP_PACKET0_GET_REG(header);
+	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		DRM_ERROR("cannot find crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+	crtc = obj_to_crtc(obj);
+	radeon_crtc = to_radeon_crtc(crtc);
+	crtc_id = radeon_crtc->crtc_id;
+
+	if (!crtc->enabled) {
+		/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+		ib[h_idx + 2] = PACKET2(0);
+		ib[h_idx + 3] = PACKET2(0);
+		ib[h_idx + 4] = PACKET2(0);
+		ib[h_idx + 5] = PACKET2(0);
+		ib[h_idx + 6] = PACKET2(0);
+		ib[h_idx + 7] = PACKET2(0);
+		ib[h_idx + 8] = PACKET2(0);
+	} else {
+		switch (reg) {
+		case EVERGREEN_VLINE_START_END:
+			header &= ~R600_CP_PACKET0_REG_MASK;
+			header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
+			ib[h_idx] = header;
+			ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
+			break;
+		default:
+			DRM_ERROR("unknown crtc reloc\n");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int evergreen_packet0_check(struct radeon_cs_parser *p,
+				   struct radeon_cs_packet *pkt,
+				   unsigned idx, unsigned reg)
+{
+	int r;
+
+	switch (reg) {
+	case EVERGREEN_VLINE_START_END:
+		r = evergreen_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			return r;
+		}
+		break;
+	default:
+		DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
+		       reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt)
+{
+	unsigned reg, i;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+		r = evergreen_packet0_check(p, pkt, idx, reg);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * evergreen_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against evergreen_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
+	struct radeon_cs_reloc *reloc;
+	u32 last_reg;
+	u32 m, i, tmp, *ib;
+	int r;
+
+	if (p->rdev->family >= CHIP_CAYMAN)
+		last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+	else
+		last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
+	i = (reg >> 7);
+	if (i >= last_reg) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (p->rdev->family >= CHIP_CAYMAN) {
+		if (!(cayman_reg_safe_bm[i] & m))
+			return 0;
+	} else {
+		if (!(evergreen_reg_safe_bm[i] & m))
+			return 0;
+	}
+	ib = p->ib.ptr;
+	switch (reg) {
+	/* force following reg to 0 in an attempt to disable out buffer
+	 * which will need us to better understand how it works to perform
+	 * security check on it (Jerome)
+	 */
+	case SQ_ESGS_RING_SIZE:
+	case SQ_GSVS_RING_SIZE:
+	case SQ_ESTMP_RING_SIZE:
+	case SQ_GSTMP_RING_SIZE:
+	case SQ_HSTMP_RING_SIZE:
+	case SQ_LSTMP_RING_SIZE:
+	case SQ_PSTMP_RING_SIZE:
+	case SQ_VSTMP_RING_SIZE:
+	case SQ_ESGS_RING_ITEMSIZE:
+	case SQ_ESTMP_RING_ITEMSIZE:
+	case SQ_GSTMP_RING_ITEMSIZE:
+	case SQ_GSVS_RING_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE_1:
+	case SQ_GS_VERT_ITEMSIZE_2:
+	case SQ_GS_VERT_ITEMSIZE_3:
+	case SQ_GSVS_RING_OFFSET_1:
+	case SQ_GSVS_RING_OFFSET_2:
+	case SQ_GSVS_RING_OFFSET_3:
+	case SQ_HSTMP_RING_ITEMSIZE:
+	case SQ_LSTMP_RING_ITEMSIZE:
+	case SQ_PSTMP_RING_ITEMSIZE:
+	case SQ_VSTMP_RING_ITEMSIZE:
+	case VGT_TF_RING_SIZE:
+		/* get value to populate the IB don't remove */
+		/*tmp =radeon_get_ib_value(p, idx);
+		  ib[idx] = 0;*/
+		break;
+	case SQ_ESGS_RING_BASE:
+	case SQ_GSVS_RING_BASE:
+	case SQ_ESTMP_RING_BASE:
+	case SQ_GSTMP_RING_BASE:
+	case SQ_HSTMP_RING_BASE:
+	case SQ_LSTMP_RING_BASE:
+	case SQ_PSTMP_RING_BASE:
+	case SQ_VSTMP_RING_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case DB_DEPTH_CONTROL:
+		track->db_depth_control = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case CAYMAN_DB_EQAA:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		break;
+	case CAYMAN_DB_DEPTH_INFO:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		break;
+	case DB_Z_INFO:
+		track->db_z_info = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] &= ~Z_ARRAY_MODE(0xf);
+			track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+			ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+			track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->lobj.tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= DB_TILE_SPLIT(tile_split) |
+						DB_BANK_WIDTH(bankw) |
+						DB_BANK_HEIGHT(bankh) |
+						DB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_INFO:
+		track->db_s_info = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_DEPTH_VIEW:
+		track->db_depth_view = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_DEPTH_SIZE:
+		track->db_depth_size = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_02805C_DB_DEPTH_SLICE:
+		track->db_depth_slice = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_Z_READ_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_z_read_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_z_read_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_Z_WRITE_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_z_write_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_z_write_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_READ_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_s_read_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_s_read_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_WRITE_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_s_write_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_s_write_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case VGT_STRMOUT_CONFIG:
+		track->vgt_strmout_config = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_CONFIG:
+		track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_BASE_0:
+	case VGT_STRMOUT_BUFFER_BASE_1:
+	case VGT_STRMOUT_BUFFER_BASE_2:
+	case VGT_STRMOUT_BUFFER_BASE_3:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->vgt_strmout_bo[tmp] = reloc->robj;
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_SIZE_0:
+	case VGT_STRMOUT_BUFFER_SIZE_1:
+	case VGT_STRMOUT_BUFFER_SIZE_2:
+	case VGT_STRMOUT_BUFFER_SIZE_3:
+		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+		/* size in register is DWs, convert to bytes */
+		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+		track->streamout_dirty = true;
+		break;
+	case CP_COHER_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+	case CB_TARGET_MASK:
+		track->cb_target_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_SHADER_MASK:
+		track->cb_shader_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case PA_SC_AA_CONFIG:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
+		track->nsamples = 1 << tmp;
+		break;
+	case CAYMAN_PA_SC_AA_CONFIG:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
+		track->nsamples = 1 << tmp;
+		break;
+	case CB_COLOR0_VIEW:
+	case CB_COLOR1_VIEW:
+	case CB_COLOR2_VIEW:
+	case CB_COLOR3_VIEW:
+	case CB_COLOR4_VIEW:
+	case CB_COLOR5_VIEW:
+	case CB_COLOR6_VIEW:
+	case CB_COLOR7_VIEW:
+		tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_VIEW:
+	case CB_COLOR9_VIEW:
+	case CB_COLOR10_VIEW:
+	case CB_COLOR11_VIEW:
+		tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_INFO:
+	case CB_COLOR1_INFO:
+	case CB_COLOR2_INFO:
+	case CB_COLOR3_INFO:
+	case CB_COLOR4_INFO:
+	case CB_COLOR5_INFO:
+	case CB_COLOR6_INFO:
+	case CB_COLOR7_INFO:
+		tmp = (reg - CB_COLOR0_INFO) / 0x3c;
+		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+		}
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_INFO:
+	case CB_COLOR9_INFO:
+	case CB_COLOR10_INFO:
+	case CB_COLOR11_INFO:
+		tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
+		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+		}
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_PITCH:
+	case CB_COLOR1_PITCH:
+	case CB_COLOR2_PITCH:
+	case CB_COLOR3_PITCH:
+	case CB_COLOR4_PITCH:
+	case CB_COLOR5_PITCH:
+	case CB_COLOR6_PITCH:
+	case CB_COLOR7_PITCH:
+		tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
+		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_PITCH:
+	case CB_COLOR9_PITCH:
+	case CB_COLOR10_PITCH:
+	case CB_COLOR11_PITCH:
+		tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
+		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_SLICE:
+	case CB_COLOR1_SLICE:
+	case CB_COLOR2_SLICE:
+	case CB_COLOR3_SLICE:
+	case CB_COLOR4_SLICE:
+	case CB_COLOR5_SLICE:
+	case CB_COLOR6_SLICE:
+	case CB_COLOR7_SLICE:
+		tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
+		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_slice_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_SLICE:
+	case CB_COLOR9_SLICE:
+	case CB_COLOR10_SLICE:
+	case CB_COLOR11_SLICE:
+		tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
+		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_slice_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_ATTRIB:
+	case CB_COLOR1_ATTRIB:
+	case CB_COLOR2_ATTRIB:
+	case CB_COLOR3_ATTRIB:
+	case CB_COLOR4_ATTRIB:
+	case CB_COLOR5_ATTRIB:
+	case CB_COLOR6_ATTRIB:
+	case CB_COLOR7_ATTRIB:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->lobj.tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= CB_TILE_SPLIT(tile_split) |
+					   CB_BANK_WIDTH(bankw) |
+					   CB_BANK_HEIGHT(bankh) |
+					   CB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
+		track->cb_color_attrib[tmp] = ib[idx];
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_ATTRIB:
+	case CB_COLOR9_ATTRIB:
+	case CB_COLOR10_ATTRIB:
+	case CB_COLOR11_ATTRIB:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->lobj.tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= CB_TILE_SPLIT(tile_split) |
+					   CB_BANK_WIDTH(bankw) |
+					   CB_BANK_HEIGHT(bankh) |
+					   CB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
+		track->cb_color_attrib[tmp] = ib[idx];
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_FMASK:
+	case CB_COLOR1_FMASK:
+	case CB_COLOR2_FMASK:
+	case CB_COLOR3_FMASK:
+	case CB_COLOR4_FMASK:
+	case CB_COLOR5_FMASK:
+	case CB_COLOR6_FMASK:
+	case CB_COLOR7_FMASK:
+		tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_fmask_bo[tmp] = reloc->robj;
+		break;
+	case CB_COLOR0_CMASK:
+	case CB_COLOR1_CMASK:
+	case CB_COLOR2_CMASK:
+	case CB_COLOR3_CMASK:
+	case CB_COLOR4_CMASK:
+	case CB_COLOR5_CMASK:
+	case CB_COLOR6_CMASK:
+	case CB_COLOR7_CMASK:
+		tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_cmask_bo[tmp] = reloc->robj;
+		break;
+	case CB_COLOR0_FMASK_SLICE:
+	case CB_COLOR1_FMASK_SLICE:
+	case CB_COLOR2_FMASK_SLICE:
+	case CB_COLOR3_FMASK_SLICE:
+	case CB_COLOR4_FMASK_SLICE:
+	case CB_COLOR5_FMASK_SLICE:
+	case CB_COLOR6_FMASK_SLICE:
+	case CB_COLOR7_FMASK_SLICE:
+		tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
+		track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
+		break;
+	case CB_COLOR0_CMASK_SLICE:
+	case CB_COLOR1_CMASK_SLICE:
+	case CB_COLOR2_CMASK_SLICE:
+	case CB_COLOR3_CMASK_SLICE:
+	case CB_COLOR4_CMASK_SLICE:
+	case CB_COLOR5_CMASK_SLICE:
+	case CB_COLOR6_CMASK_SLICE:
+	case CB_COLOR7_CMASK_SLICE:
+		tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
+		track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
+		break;
+	case CB_COLOR0_BASE:
+	case CB_COLOR1_BASE:
+	case CB_COLOR2_BASE:
+	case CB_COLOR3_BASE:
+	case CB_COLOR4_BASE:
+	case CB_COLOR5_BASE:
+	case CB_COLOR6_BASE:
+	case CB_COLOR7_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - CB_COLOR0_BASE) / 0x3c;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_BASE:
+	case CB_COLOR9_BASE:
+	case CB_COLOR10_BASE:
+	case CB_COLOR11_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_dirty = true;
+		break;
+	case DB_HTILE_DATA_BASE:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->htile_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->htile_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_SURFACE:
+		/* 8x8 only */
+		track->htile_surface = radeon_get_ib_value(p, idx);
+		/* force 8x8 htile width and height */
+		ib[idx] |= 3;
+		track->db_dirty = true;
+		break;
+	case CB_IMMED0_BASE:
+	case CB_IMMED1_BASE:
+	case CB_IMMED2_BASE:
+	case CB_IMMED3_BASE:
+	case CB_IMMED4_BASE:
+	case CB_IMMED5_BASE:
+	case CB_IMMED6_BASE:
+	case CB_IMMED7_BASE:
+	case CB_IMMED8_BASE:
+	case CB_IMMED9_BASE:
+	case CB_IMMED10_BASE:
+	case CB_IMMED11_BASE:
+	case SQ_PGM_START_FS:
+	case SQ_PGM_START_ES:
+	case SQ_PGM_START_VS:
+	case SQ_PGM_START_GS:
+	case SQ_PGM_START_PS:
+	case SQ_PGM_START_HS:
+	case SQ_PGM_START_LS:
+	case SQ_CONST_MEM_BASE:
+	case SQ_ALU_CONST_CACHE_GS_0:
+	case SQ_ALU_CONST_CACHE_GS_1:
+	case SQ_ALU_CONST_CACHE_GS_2:
+	case SQ_ALU_CONST_CACHE_GS_3:
+	case SQ_ALU_CONST_CACHE_GS_4:
+	case SQ_ALU_CONST_CACHE_GS_5:
+	case SQ_ALU_CONST_CACHE_GS_6:
+	case SQ_ALU_CONST_CACHE_GS_7:
+	case SQ_ALU_CONST_CACHE_GS_8:
+	case SQ_ALU_CONST_CACHE_GS_9:
+	case SQ_ALU_CONST_CACHE_GS_10:
+	case SQ_ALU_CONST_CACHE_GS_11:
+	case SQ_ALU_CONST_CACHE_GS_12:
+	case SQ_ALU_CONST_CACHE_GS_13:
+	case SQ_ALU_CONST_CACHE_GS_14:
+	case SQ_ALU_CONST_CACHE_GS_15:
+	case SQ_ALU_CONST_CACHE_PS_0:
+	case SQ_ALU_CONST_CACHE_PS_1:
+	case SQ_ALU_CONST_CACHE_PS_2:
+	case SQ_ALU_CONST_CACHE_PS_3:
+	case SQ_ALU_CONST_CACHE_PS_4:
+	case SQ_ALU_CONST_CACHE_PS_5:
+	case SQ_ALU_CONST_CACHE_PS_6:
+	case SQ_ALU_CONST_CACHE_PS_7:
+	case SQ_ALU_CONST_CACHE_PS_8:
+	case SQ_ALU_CONST_CACHE_PS_9:
+	case SQ_ALU_CONST_CACHE_PS_10:
+	case SQ_ALU_CONST_CACHE_PS_11:
+	case SQ_ALU_CONST_CACHE_PS_12:
+	case SQ_ALU_CONST_CACHE_PS_13:
+	case SQ_ALU_CONST_CACHE_PS_14:
+	case SQ_ALU_CONST_CACHE_PS_15:
+	case SQ_ALU_CONST_CACHE_VS_0:
+	case SQ_ALU_CONST_CACHE_VS_1:
+	case SQ_ALU_CONST_CACHE_VS_2:
+	case SQ_ALU_CONST_CACHE_VS_3:
+	case SQ_ALU_CONST_CACHE_VS_4:
+	case SQ_ALU_CONST_CACHE_VS_5:
+	case SQ_ALU_CONST_CACHE_VS_6:
+	case SQ_ALU_CONST_CACHE_VS_7:
+	case SQ_ALU_CONST_CACHE_VS_8:
+	case SQ_ALU_CONST_CACHE_VS_9:
+	case SQ_ALU_CONST_CACHE_VS_10:
+	case SQ_ALU_CONST_CACHE_VS_11:
+	case SQ_ALU_CONST_CACHE_VS_12:
+	case SQ_ALU_CONST_CACHE_VS_13:
+	case SQ_ALU_CONST_CACHE_VS_14:
+	case SQ_ALU_CONST_CACHE_VS_15:
+	case SQ_ALU_CONST_CACHE_HS_0:
+	case SQ_ALU_CONST_CACHE_HS_1:
+	case SQ_ALU_CONST_CACHE_HS_2:
+	case SQ_ALU_CONST_CACHE_HS_3:
+	case SQ_ALU_CONST_CACHE_HS_4:
+	case SQ_ALU_CONST_CACHE_HS_5:
+	case SQ_ALU_CONST_CACHE_HS_6:
+	case SQ_ALU_CONST_CACHE_HS_7:
+	case SQ_ALU_CONST_CACHE_HS_8:
+	case SQ_ALU_CONST_CACHE_HS_9:
+	case SQ_ALU_CONST_CACHE_HS_10:
+	case SQ_ALU_CONST_CACHE_HS_11:
+	case SQ_ALU_CONST_CACHE_HS_12:
+	case SQ_ALU_CONST_CACHE_HS_13:
+	case SQ_ALU_CONST_CACHE_HS_14:
+	case SQ_ALU_CONST_CACHE_HS_15:
+	case SQ_ALU_CONST_CACHE_LS_0:
+	case SQ_ALU_CONST_CACHE_LS_1:
+	case SQ_ALU_CONST_CACHE_LS_2:
+	case SQ_ALU_CONST_CACHE_LS_3:
+	case SQ_ALU_CONST_CACHE_LS_4:
+	case SQ_ALU_CONST_CACHE_LS_5:
+	case SQ_ALU_CONST_CACHE_LS_6:
+	case SQ_ALU_CONST_CACHE_LS_7:
+	case SQ_ALU_CONST_CACHE_LS_8:
+	case SQ_ALU_CONST_CACHE_LS_9:
+	case SQ_ALU_CONST_CACHE_LS_10:
+	case SQ_ALU_CONST_CACHE_LS_11:
+	case SQ_ALU_CONST_CACHE_LS_12:
+	case SQ_ALU_CONST_CACHE_LS_13:
+	case SQ_ALU_CONST_CACHE_LS_14:
+	case SQ_ALU_CONST_CACHE_LS_15:
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MEMORY_EXPORT_BASE:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case CAYMAN_SX_SCATTER_EXPORT_BASE:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MISC:
+		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+		break;
+	default:
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	u32 last_reg, m, i;
+
+	if (p->rdev->family >= CHIP_CAYMAN)
+		last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+	else
+		last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
+	i = (reg >> 7);
+	if (i >= last_reg) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return false;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (p->rdev->family >= CHIP_CAYMAN) {
+		if (!(cayman_reg_safe_bm[i] & m))
+			return true;
+	} else {
+		if (!(evergreen_reg_safe_bm[i] & m))
+			return true;
+	}
+	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+	return false;
+}
+
+static int evergreen_packet3_check(struct radeon_cs_parser *p,
+				   struct radeon_cs_packet *pkt)
+{
+	struct radeon_cs_reloc *reloc;
+	struct evergreen_cs_track *track;
+	volatile u32 *ib;
+	unsigned idx;
+	unsigned i;
+	unsigned start_reg, end_reg, reg;
+	int r;
+	u32 idx_value;
+
+	track = (struct evergreen_cs_track *)p->track;
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (pkt->opcode) {
+	case PACKET3_SET_PREDICATION:
+	{
+		int pred_op;
+		int tmp;
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		tmp = radeon_get_ib_value(p, idx + 1);
+		pred_op = (tmp >> 16) & 0x7;
+
+		/* for the clear predicate operation */
+		if (pred_op == 0)
+			return 0;
+
+		if (pred_op > 2) {
+			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+			return -EINVAL;
+		}
+
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (idx_value & 0xfffffff0) +
+		         ((u64)(tmp & 0xff) << 32);
+
+		ib[idx + 0] = offset;
+		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+	}
+	break;
+	case PACKET3_CONTEXT_CONTROL:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad CONTEXT_CONTROL\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_CLEAR_STATE:
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+			return -EINVAL;
+		}
+		break;
+	case CAYMAN_PACKET3_DEALLOC_STATE:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_BASE:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad INDEX_BASE\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad INDEX_BASE\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         idx_value +
+		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX:
+	{
+		uint64_t offset;
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         idx_value +
+		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_2:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad DRAW_INDEX_2\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX_2\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         radeon_get_ib_value(p, idx+1) +
+		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset;
+		ib[idx+2] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_AUTO:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+		if (pkt->count != 2) {
+			DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_IMMD:
+		if (pkt->count < 2) {
+			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_OFFSET:
+		if (pkt->count != 2) {
+			DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_DISPATCH_DIRECT:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DISPATCH_DIRECT\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DISPATCH_INDIRECT:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DISPATCH_INDIRECT\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad DISPATCH_INDIRECT\n");
+			return -EINVAL;
+		}
+		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_WAIT_REG_MEM:
+		if (pkt->count != 5) {
+			DRM_ERROR("bad WAIT_REG_MEM\n");
+			return -EINVAL;
+		}
+		/* bit 4 is reg (0) or mem (1) */
+		if (idx_value & 0x10) {
+			uint64_t offset;
+
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad WAIT_REG_MEM\n");
+				return -EINVAL;
+			}
+
+			offset = reloc->lobj.gpu_offset +
+			         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_CP_DMA:
+	{
+		u32 command, size, info;
+		u64 offset, tmp;
+		if (pkt->count != 4) {
+			DRM_ERROR("bad CP DMA\n");
+			return -EINVAL;
+		}
+		command = radeon_get_ib_value(p, idx+4);
+		size = command & 0x1fffff;
+		info = radeon_get_ib_value(p, idx+1);
+		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+		    ((((info & 0x00300000) >> 20) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+		    ((((info & 0x60000000) >> 29) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+			/* non mem to mem copies requires dw aligned count */
+			if (size % 4) {
+				DRM_ERROR("CP DMA command requires dw count alignment\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			/* GDS is ok */
+			if (((info & 0x60000000) >> 29) != 1) {
+				DRM_ERROR("CP DMA SAS not supported\n");
+				return -EINVAL;
+			}
+		} else {
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				DRM_ERROR("CP DMA SAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			/* src address space is memory */
+			if (((info & 0x60000000) >> 29) == 0) {
+				r = evergreen_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad CP DMA SRC\n");
+					return -EINVAL;
+				}
+
+				tmp = radeon_get_ib_value(p, idx) +
+					((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+				offset = reloc->lobj.gpu_offset + tmp;
+
+				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+					dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n",
+						 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
+					return -EINVAL;
+				}
+
+				ib[idx] = offset;
+				ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+			} else if (((info & 0x60000000) >> 29) != 2) {
+				DRM_ERROR("bad CP DMA SRC_SEL\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			/* GDS is ok */
+			if (((info & 0x00300000) >> 20) != 1) {
+				DRM_ERROR("CP DMA DAS not supported\n");
+				return -EINVAL;
+			}
+		} else {
+			/* dst address space is memory */
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				DRM_ERROR("CP DMA DAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			if (((info & 0x00300000) >> 20) == 0) {
+				r = evergreen_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad CP DMA DST\n");
+					return -EINVAL;
+				}
+
+				tmp = radeon_get_ib_value(p, idx+2) +
+					((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+				offset = reloc->lobj.gpu_offset + tmp;
+
+				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+					dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n",
+						 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
+					return -EINVAL;
+				}
+
+				ib[idx+2] = offset;
+				ib[idx+3] = upper_32_bits(offset) & 0xff;
+			} else {
+				DRM_ERROR("bad CP DMA DST_SEL\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	}
+	case PACKET3_SURFACE_SYNC:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad SURFACE_SYNC\n");
+			return -EINVAL;
+		}
+		/* 0xffffffff/0x0 is flush all cache flag */
+		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+		    radeon_get_ib_value(p, idx + 2) != 0) {
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad SURFACE_SYNC\n");
+				return -EINVAL;
+			}
+			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_EVENT_WRITE:
+		if (pkt->count != 2 && pkt->count != 0) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			uint64_t offset;
+
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad EVENT_WRITE\n");
+				return -EINVAL;
+			}
+			offset = reloc->lobj.gpu_offset +
+			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = offset & 0xfffffff8;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_EVENT_WRITE_EOP:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_EVENT_WRITE_EOS:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad EVENT_WRITE_EOS\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE_EOS\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = evergreen_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_CONTEXT_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = evergreen_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_RESOURCE:
+		if (pkt->count % 8) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_RESOURCE_START) ||
+		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
+		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < (pkt->count / 8); i++) {
+			struct radeon_bo *texture, *mipmap;
+			u32 toffset, moffset;
+			u32 size, offset, mip_address, tex_dim;
+
+			switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
+			case SQ_TEX_VTX_VALID_TEXTURE:
+				/* tex base */
+				r = evergreen_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE (tex)\n");
+					return -EINVAL;
+				}
+				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+					ib[idx+1+(i*8)+1] |=
+						TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+						unsigned bankw, bankh, mtaspect, tile_split;
+
+						evergreen_tiling_fields(reloc->lobj.tiling_flags,
+									&bankw, &bankh, &mtaspect,
+									&tile_split);
+						ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
+						ib[idx+1+(i*8)+7] |=
+							TEX_BANK_WIDTH(bankw) |
+							TEX_BANK_HEIGHT(bankh) |
+							MACRO_TILE_ASPECT(mtaspect) |
+							TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+					}
+				}
+				texture = reloc->robj;
+				toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+
+				/* tex mip base */
+				tex_dim = ib[idx+1+(i*8)+0] & 0x7;
+				mip_address = ib[idx+1+(i*8)+3];
+
+				if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
+				    !mip_address &&
+				    !evergreen_cs_packet_next_is_pkt3_nop(p)) {
+					/* MIP_ADDRESS should point to FMASK for an MSAA texture.
+					 * It should be 0 if FMASK is disabled. */
+					moffset = 0;
+					mipmap = NULL;
+				} else {
+					r = evergreen_cs_packet_next_reloc(p, &reloc);
+					if (r) {
+						DRM_ERROR("bad SET_RESOURCE (tex)\n");
+						return -EINVAL;
+					}
+					moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+					mipmap = reloc->robj;
+				}
+
+				r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
+				if (r)
+					return r;
+				ib[idx+1+(i*8)+2] += toffset;
+				ib[idx+1+(i*8)+3] += moffset;
+				break;
+			case SQ_TEX_VTX_VALID_BUFFER:
+			{
+				uint64_t offset64;
+				/* vtx base */
+				r = evergreen_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+					return -EINVAL;
+				}
+				offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
+				size = radeon_get_ib_value(p, idx+1+(i*8)+1);
+				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+					/* force size to size of the buffer */
+					dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+					ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
+				}
+
+				offset64 = reloc->lobj.gpu_offset + offset;
+				ib[idx+1+(i*8)+0] = offset64;
+				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+						    (upper_32_bits(offset64) & 0xff);
+				break;
+			}
+			case SQ_TEX_VTX_INVALID_TEXTURE:
+			case SQ_TEX_VTX_INVALID_BUFFER:
+			default:
+				DRM_ERROR("bad SET_RESOURCE\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_ALU_CONST:
+		/* XXX fix me ALU const buffers only */
+		break;
+	case PACKET3_SET_BOOL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
+		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+			DRM_ERROR("bad SET_BOOL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_LOOP_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
+		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+			DRM_ERROR("bad SET_LOOP_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CTL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
+		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+			DRM_ERROR("bad SET_CTL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_SAMPLER:
+		if (pkt->count % 3) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_SAMPLER_START) ||
+		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
+		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+			return -EINVAL;
+		}
+		/* Updating memory at DST_ADDRESS. */
+		if (idx_value & 0x1) {
+			u64 offset;
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		/* Reading data from SRC_ADDRESS. */
+		if (((idx_value >> 1) & 0x3) == 2) {
+			u64 offset;
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_MEM_WRITE:
+	{
+		u64 offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+			return -EINVAL;
+		}
+		offset = radeon_get_ib_value(p, idx+0);
+		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+		if (offset & 0x7) {
+			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+			return -EINVAL;
+		}
+		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+			DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n",
+				  (uintmax_t)offset + 8, radeon_bo_size(reloc->robj));
+			return -EINVAL;
+		}
+		offset += reloc->lobj.gpu_offset;
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+		break;
+	}
+	case PACKET3_COPY_DW:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad COPY_DW (invalid count)\n");
+			return -EINVAL;
+		}
+		if (idx_value & 0x1) {
+			u64 offset;
+			/* SRC is memory. */
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* SRC is a reg. */
+			reg = radeon_get_ib_value(p, idx+1) << 2;
+			if (!evergreen_is_safe_reg(p, reg, idx+1))
+				return -EINVAL;
+		}
+		if (idx_value & 0x2) {
+			u64 offset;
+			/* DST is memory. */
+			r = evergreen_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* DST is a reg. */
+			reg = radeon_get_ib_value(p, idx+3) << 2;
+			if (!evergreen_is_safe_reg(p, reg, idx+3))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int evergreen_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct evergreen_cs_track *track;
+	u32 tmp;
+	int r;
+
+	if (p->track == NULL) {
+		/* initialize tracker, we are in kms */
+		track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (track == NULL)
+			return -ENOMEM;
+		evergreen_cs_track_init(track);
+		if (p->rdev->family >= CHIP_CAYMAN)
+			tmp = p->rdev->config.cayman.tile_config;
+		else
+			tmp = p->rdev->config.evergreen.tile_config;
+
+		switch (tmp & 0xf) {
+		case 0:
+			track->npipes = 1;
+			break;
+		case 1:
+		default:
+			track->npipes = 2;
+			break;
+		case 2:
+			track->npipes = 4;
+			break;
+		case 3:
+			track->npipes = 8;
+			break;
+		}
+
+		switch ((tmp & 0xf0) >> 4) {
+		case 0:
+			track->nbanks = 4;
+			break;
+		case 1:
+		default:
+			track->nbanks = 8;
+			break;
+		case 2:
+			track->nbanks = 16;
+			break;
+		}
+
+		switch ((tmp & 0xf00) >> 8) {
+		case 0:
+			track->group_size = 256;
+			break;
+		case 1:
+		default:
+			track->group_size = 512;
+			break;
+		}
+
+		switch ((tmp & 0xf000) >> 12) {
+		case 0:
+			track->row_size = 1;
+			break;
+		case 1:
+		default:
+			track->row_size = 2;
+			break;
+		case 2:
+			track->row_size = 4;
+			break;
+		}
+
+		p->track = track;
+	}
+	do {
+		r = evergreen_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case PACKET_TYPE0:
+			r = evergreen_cs_parse_packet0(p, &pkt);
+			break;
+		case PACKET_TYPE2:
+			break;
+		case PACKET_TYPE3:
+			r = evergreen_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return -EINVAL;
+		}
+		if (r) {
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return r;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib.length_dw; r++) {
+		DRM_INFO("%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	free(p->track, DRM_MEM_DRIVER);
+	p->track = NULL;
+	return 0;
+}
+
+/*
+ *  DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p:		parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+	u32 header, cmd, count, tiled, new_cmd, misc;
+	volatile u32 *ib = p->ib.ptr;
+	u32 idx, idx_value;
+	u64 src_offset, dst_offset, dst2_offset;
+	int r;
+
+	do {
+		if (p->idx >= ib_chunk->length_dw) {
+			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+				  p->idx, ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		idx = p->idx;
+		header = radeon_get_ib_value(p, idx);
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		tiled = GET_DMA_T(header);
+		new_cmd = GET_DMA_NEW(header);
+		misc = GET_DMA_MISC(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				p->idx += count + 7;
+			} else {
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += count + 3;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n",
+					 (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			r = r600_dma_cs_next_reloc(p, &src_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				idx_value = radeon_get_ib_value(p, idx + 2);
+				if (new_cmd) {
+					switch (misc) {
+					case 0:
+						/* L2T, frame to fields */
+						if (idx_value & (1U << 31)) {
+							DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+						if (r) {
+							DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						dst_offset = radeon_get_ib_value(p, idx+1);
+						dst_offset <<= 8;
+						dst2_offset = radeon_get_ib_value(p, idx+2);
+						dst2_offset <<= 8;
+						src_offset = radeon_get_ib_value(p, idx+8);
+						src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%ju %lu)\n",
+								 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+						ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 10;
+						break;
+					case 1:
+						/* L2T, T2L partial */
+						if (p->family < CHIP_CAYMAN) {
+							DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+							return -EINVAL;
+						}
+						/* detile bit */
+						if (idx_value & (1U << 31)) {
+							/* tiled src, linear dst */
+							ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+							ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						} else {
+							/* linear src, tiled dst */
+							ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+							ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						}
+						p->idx += 12;
+						break;
+					case 3:
+						/* L2T, broadcast */
+						if (idx_value & (1U << 31)) {
+							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+						if (r) {
+							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						dst_offset = radeon_get_ib_value(p, idx+1);
+						dst_offset <<= 8;
+						dst2_offset = radeon_get_ib_value(p, idx+2);
+						dst2_offset <<= 8;
+						src_offset = radeon_get_ib_value(p, idx+8);
+						src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n",
+								 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+						ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 10;
+						break;
+					case 4:
+						/* L2T, T2L */
+						/* detile bit */
+						if (idx_value & (1U << 31)) {
+							/* tiled src, linear dst */
+							src_offset = radeon_get_ib_value(p, idx+1);
+							src_offset <<= 8;
+							ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+							dst_offset = radeon_get_ib_value(p, idx+7);
+							dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+							ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						} else {
+							/* linear src, tiled dst */
+							src_offset = radeon_get_ib_value(p, idx+7);
+							src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+							ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+							dst_offset = radeon_get_ib_value(p, idx+1);
+							dst_offset <<= 8;
+							ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						}
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%ju %lu)\n",
+								 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						p->idx += 9;
+						break;
+					case 5:
+						/* T2T partial */
+						if (p->family < CHIP_CAYMAN) {
+							DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+						ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						p->idx += 13;
+						break;
+					case 7:
+						/* L2T, broadcast */
+						if (idx_value & (1U << 31)) {
+							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+						if (r) {
+							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						dst_offset = radeon_get_ib_value(p, idx+1);
+						dst_offset <<= 8;
+						dst2_offset = radeon_get_ib_value(p, idx+2);
+						dst2_offset <<= 8;
+						src_offset = radeon_get_ib_value(p, idx+8);
+						src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n",
+								 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+						ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 10;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				} else {
+					switch (misc) {
+					case 0:
+						/* detile bit */
+						if (idx_value & (1U << 31)) {
+							/* tiled src, linear dst */
+							src_offset = radeon_get_ib_value(p, idx+1);
+							src_offset <<= 8;
+							ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+							dst_offset = radeon_get_ib_value(p, idx+7);
+							dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+							ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						} else {
+							/* linear src, tiled dst */
+							src_offset = radeon_get_ib_value(p, idx+7);
+							src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+							ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+							dst_offset = radeon_get_ib_value(p, idx+1);
+							dst_offset <<= 8;
+							ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						}
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n",
+								 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						p->idx += 9;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				}
+			} else {
+				if (new_cmd) {
+					switch (misc) {
+					case 0:
+						/* L2L, byte */
+						src_offset = radeon_get_ib_value(p, idx+2);
+						src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+						dst_offset = radeon_get_ib_value(p, idx+1);
+						dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+						if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, byte src buffer too small (%ju %lu)\n",
+								 (uintmax_t)src_offset + count, radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst_offset + count, radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+						ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+						ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 5;
+						break;
+					case 1:
+						/* L2L, partial */
+						if (p->family < CHIP_CAYMAN) {
+							DRM_ERROR("L2L Partial is cayman only !\n");
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+						ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+						ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+						p->idx += 9;
+						break;
+					case 4:
+						/* L2L, dw, broadcast */
+						r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+						if (r) {
+							DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						dst_offset = radeon_get_ib_value(p, idx+1);
+						dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+						dst2_offset = radeon_get_ib_value(p, idx+2);
+						dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+						src_offset = radeon_get_ib_value(p, idx+3);
+						src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%ju %lu)\n",
+								 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%ju %lu)\n",
+								 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+						ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 7;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				} else {
+					/* L2L, dw */
+					src_offset = radeon_get_ib_value(p, idx+2);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+					if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+						dev_warn(p->dev, "DMA L2L, dw src buffer too small (%ju %lu)\n",
+							 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+						return -EINVAL;
+					}
+					if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+						dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%ju %lu)\n",
+							 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+						return -EINVAL;
+					}
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+					p->idx += 5;
+				}
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+				return -EINVAL;
+			}
+			dst_offset = radeon_get_ib_value(p, idx+1);
+			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n",
+					 (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+			p->idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			p->idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib->length_dw; r++) {
+		DRM_INFO("%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	return 0;
+}
+
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+	/* context regs are fine */
+	if (reg >= 0x28000)
+		return true;
+
+	/* check config regs */
+	switch (reg) {
+	case WAIT_UNTIL:
+	case GRBM_GFX_INDEX:
+	case CP_STRMOUT_CNTL:
+	case CP_COHER_CNTL:
+	case CP_COHER_SIZE:
+	case VGT_VTX_VECT_EJECT_REG:
+	case VGT_CACHE_INVALIDATION:
+	case VGT_GS_VERTEX_REUSE:
+	case VGT_PRIMITIVE_TYPE:
+	case VGT_INDEX_TYPE:
+	case VGT_NUM_INDICES:
+	case VGT_NUM_INSTANCES:
+	case VGT_COMPUTE_DIM_X:
+	case VGT_COMPUTE_DIM_Y:
+	case VGT_COMPUTE_DIM_Z:
+	case VGT_COMPUTE_START_X:
+	case VGT_COMPUTE_START_Y:
+	case VGT_COMPUTE_START_Z:
+	case VGT_COMPUTE_INDEX:
+	case VGT_COMPUTE_THREAD_GROUP_SIZE:
+	case VGT_HS_OFFCHIP_PARAM:
+	case PA_CL_ENHANCE:
+	case PA_SU_LINE_STIPPLE_VALUE:
+	case PA_SC_LINE_STIPPLE_STATE:
+	case PA_SC_ENHANCE:
+	case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+	case SQ_DYN_GPR_SIMD_LOCK_EN:
+	case SQ_CONFIG:
+	case SQ_GPR_RESOURCE_MGMT_1:
+	case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+	case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+	case SQ_CONST_MEM_BASE:
+	case SQ_STATIC_THREAD_MGMT_1:
+	case SQ_STATIC_THREAD_MGMT_2:
+	case SQ_STATIC_THREAD_MGMT_3:
+	case SPI_CONFIG_CNTL:
+	case SPI_CONFIG_CNTL_1:
+	case TA_CNTL_AUX:
+	case DB_DEBUG:
+	case DB_DEBUG2:
+	case DB_DEBUG3:
+	case DB_DEBUG4:
+	case DB_WATERMARKS:
+	case TD_PS_BORDER_COLOR_INDEX:
+	case TD_PS_BORDER_COLOR_RED:
+	case TD_PS_BORDER_COLOR_GREEN:
+	case TD_PS_BORDER_COLOR_BLUE:
+	case TD_PS_BORDER_COLOR_ALPHA:
+	case TD_VS_BORDER_COLOR_INDEX:
+	case TD_VS_BORDER_COLOR_RED:
+	case TD_VS_BORDER_COLOR_GREEN:
+	case TD_VS_BORDER_COLOR_BLUE:
+	case TD_VS_BORDER_COLOR_ALPHA:
+	case TD_GS_BORDER_COLOR_INDEX:
+	case TD_GS_BORDER_COLOR_RED:
+	case TD_GS_BORDER_COLOR_GREEN:
+	case TD_GS_BORDER_COLOR_BLUE:
+	case TD_GS_BORDER_COLOR_ALPHA:
+	case TD_HS_BORDER_COLOR_INDEX:
+	case TD_HS_BORDER_COLOR_RED:
+	case TD_HS_BORDER_COLOR_GREEN:
+	case TD_HS_BORDER_COLOR_BLUE:
+	case TD_HS_BORDER_COLOR_ALPHA:
+	case TD_LS_BORDER_COLOR_INDEX:
+	case TD_LS_BORDER_COLOR_RED:
+	case TD_LS_BORDER_COLOR_GREEN:
+	case TD_LS_BORDER_COLOR_BLUE:
+	case TD_LS_BORDER_COLOR_ALPHA:
+	case TD_CS_BORDER_COLOR_INDEX:
+	case TD_CS_BORDER_COLOR_RED:
+	case TD_CS_BORDER_COLOR_GREEN:
+	case TD_CS_BORDER_COLOR_BLUE:
+	case TD_CS_BORDER_COLOR_ALPHA:
+	case SQ_ESGS_RING_SIZE:
+	case SQ_GSVS_RING_SIZE:
+	case SQ_ESTMP_RING_SIZE:
+	case SQ_GSTMP_RING_SIZE:
+	case SQ_HSTMP_RING_SIZE:
+	case SQ_LSTMP_RING_SIZE:
+	case SQ_PSTMP_RING_SIZE:
+	case SQ_VSTMP_RING_SIZE:
+	case SQ_ESGS_RING_ITEMSIZE:
+	case SQ_ESTMP_RING_ITEMSIZE:
+	case SQ_GSTMP_RING_ITEMSIZE:
+	case SQ_GSVS_RING_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE_1:
+	case SQ_GS_VERT_ITEMSIZE_2:
+	case SQ_GS_VERT_ITEMSIZE_3:
+	case SQ_GSVS_RING_OFFSET_1:
+	case SQ_GSVS_RING_OFFSET_2:
+	case SQ_GSVS_RING_OFFSET_3:
+	case SQ_HSTMP_RING_ITEMSIZE:
+	case SQ_LSTMP_RING_ITEMSIZE:
+	case SQ_PSTMP_RING_ITEMSIZE:
+	case SQ_VSTMP_RING_ITEMSIZE:
+	case VGT_TF_RING_SIZE:
+	case SQ_ESGS_RING_BASE:
+	case SQ_GSVS_RING_BASE:
+	case SQ_ESTMP_RING_BASE:
+	case SQ_GSTMP_RING_BASE:
+	case SQ_HSTMP_RING_BASE:
+	case SQ_LSTMP_RING_BASE:
+	case SQ_PSTMP_RING_BASE:
+	case SQ_VSTMP_RING_BASE:
+	case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+	case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+		return true;
+	default:
+		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+		return false;
+	}
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+				      u32 *ib, struct radeon_cs_packet *pkt)
+{
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, end_reg, reg, i;
+	u32 command, info;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_INDEX_BUFFER_SIZE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_MODE_CONTROL:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_DRAW_INDIRECT:
+	case PACKET3_DRAW_INDEX_INDIRECT:
+	case PACKET3_INDEX_BASE:
+	case PACKET3_DRAW_INDEX_2:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_DRAW_INDEX_OFFSET:
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_DRAW_INDEX:
+	case PACKET3_DRAW_INDEX_AUTO:
+	case PACKET3_DRAW_INDEX_IMMD:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+	case PACKET3_MPEG_INDEX:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_BOOL_CONST:
+	case PACKET3_SET_LOOP_CONST:
+	case PACKET3_SET_RESOURCE:
+	case PACKET3_SET_SAMPLER:
+	case PACKET3_SET_CTL_CONST:
+	case PACKET3_SET_RESOURCE_OFFSET:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_RESOURCE_INDIRECT:
+	case CAYMAN_PACKET3_DEALLOC_STATE:
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+		command = ib[idx + 4];
+		info = ib[idx + 1];
+		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+		    ((((info & 0x00300000) >> 20) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+		    ((((info & 0x60000000) >> 29) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+			/* non mem to mem copies requires dw aligned count */
+			if ((command & 0x1fffff) % 4) {
+				DRM_ERROR("CP DMA command requires dw count alignment\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			if (((info & 0x60000000) >> 29) == 0) {
+				start_reg = idx_value << 2;
+				if (command & PACKET3_CP_DMA_CMD_SAIC) {
+					reg = start_reg;
+					if (!evergreen_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!evergreen_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad SRC register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			if (((info & 0x00300000) >> 20) == 0) {
+				start_reg = ib[idx + 2];
+				if (command & PACKET3_CP_DMA_CMD_DAIC) {
+					reg = start_reg;
+					if (!evergreen_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!evergreen_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad DST register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	int ret = 0;
+	u32 idx = 0;
+	struct radeon_cs_packet pkt;
+
+	do {
+		pkt.idx = idx;
+		pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+		pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+		pkt.one_reg_wr = 0;
+		switch (pkt.type) {
+		case PACKET_TYPE0:
+			dev_err(rdev->dev, "Packet0 not allowed!\n");
+			ret = -EINVAL;
+			break;
+		case PACKET_TYPE2:
+			idx += 1;
+			break;
+		case PACKET_TYPE3:
+			pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+			ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+			idx += pkt.count + 2;
+			break;
+		default:
+			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+			ret = -EINVAL;
+			break;
+		}
+		if (ret)
+			break;
+	} while (idx < ib->length_dw);
+
+	return ret;
+}
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib:	radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	u32 idx = 0;
+	u32 header, cmd, count, tiled, new_cmd, misc;
+
+	do {
+		header = ib->ptr[idx];
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		tiled = GET_DMA_T(header);
+		new_cmd = GET_DMA_NEW(header);
+		misc = GET_DMA_MISC(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			if (tiled)
+				idx += count + 7;
+			else
+				idx += count + 3;
+			break;
+		case DMA_PACKET_COPY:
+			if (tiled) {
+				if (new_cmd) {
+					switch (misc) {
+					case 0:
+						/* L2T, frame to fields */
+						idx += 10;
+						break;
+					case 1:
+						/* L2T, T2L partial */
+						idx += 12;
+						break;
+					case 3:
+						/* L2T, broadcast */
+						idx += 10;
+						break;
+					case 4:
+						/* L2T, T2L */
+						idx += 9;
+						break;
+					case 5:
+						/* T2T partial */
+						idx += 13;
+						break;
+					case 7:
+						/* L2T, broadcast */
+						idx += 10;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				} else {
+					switch (misc) {
+					case 0:
+						idx += 9;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				}
+			} else {
+				if (new_cmd) {
+					switch (misc) {
+					case 0:
+						/* L2L, byte */
+						idx += 5;
+						break;
+					case 1:
+						/* L2L, partial */
+						idx += 9;
+						break;
+					case 4:
+						/* L2L, dw, broadcast */
+						idx += 7;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				} else {
+					/* L2L, dw */
+					idx += 5;
+				}
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (idx < ib->length_dw);
+
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/evergreen_cs.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreen_hdmi.c
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreen_hdmi.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreen_hdmi.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,218 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ *          Rafał Miłecki
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreen_hdmi.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+	WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+
+	WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+	WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+	WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+	WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
+					 uint8_t versionNumber,
+					 uint8_t length,
+					 uint8_t *frame)
+{
+	int i;
+	frame[0] = packetType + versionNumber + length;
+	for (i = 1; i <= length; i++)
+		frame[0] += frame[i];
+	frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void evergreen_hdmi_videoinfoframe(
+	struct drm_encoder *encoder,
+	uint8_t color_format,
+	int active_information_present,
+	uint8_t active_format_aspect_ratio,
+	uint8_t scan_information,
+	uint8_t colorimetry,
+	uint8_t ex_colorimetry,
+	uint8_t quantization,
+	int ITC,
+	uint8_t picture_aspect_ratio,
+	uint8_t video_format_identification,
+	uint8_t pixel_repetition,
+	uint8_t non_uniform_picture_scaling,
+	uint8_t bar_info_data_valid,
+	uint16_t top_bar,
+	uint16_t bottom_bar,
+	uint16_t left_bar,
+	uint16_t right_bar
+)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	uint8_t frame[14];
+
+	frame[0x0] = 0;
+	frame[0x1] =
+		(scan_information & 0x3) |
+		((bar_info_data_valid & 0x3) << 2) |
+		((active_information_present & 0x1) << 4) |
+		((color_format & 0x3) << 5);
+	frame[0x2] =
+		(active_format_aspect_ratio & 0xF) |
+		((picture_aspect_ratio & 0x3) << 4) |
+		((colorimetry & 0x3) << 6);
+	frame[0x3] =
+		(non_uniform_picture_scaling & 0x3) |
+		((quantization & 0x3) << 2) |
+		((ex_colorimetry & 0x7) << 4) |
+		((ITC & 0x1) << 7);
+	frame[0x4] = (video_format_identification & 0x7F);
+	frame[0x5] = (pixel_repetition & 0xF);
+	frame[0x6] = (top_bar & 0xFF);
+	frame[0x7] = (top_bar >> 8);
+	frame[0x8] = (bottom_bar & 0xFF);
+	frame[0x9] = (bottom_bar >> 8);
+	frame[0xA] = (left_bar & 0xFF);
+	frame[0xB] = (left_bar >> 8);
+	frame[0xC] = (right_bar & 0xFF);
+	frame[0xD] = (right_bar >> 8);
+
+	evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+	/* Our header values (type, version, length) should be alright, Intel
+	 * is using the same. Checksum function also seems to be OK, it works
+	 * fine for audio infoframe. However calculated value is always lower
+	 * by 2 in comparison to fglrx. It breaks displaying anything in case
+	 * of TVs that strictly check the checksum. Hack it manually here to
+	 * workaround this issue. */
+	frame[0x0] += 2;
+
+	WREG32(AFMT_AVI_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(AFMT_AVI_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+	WREG32(AFMT_AVI_INFO2 + offset,
+		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+	WREG32(AFMT_AVI_INFO3 + offset,
+		frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (!dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	r600_audio_set_clock(encoder, mode->clock);
+
+	WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+	       HDMI_NULL_SEND); /* send null packets when required */
+
+	WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+	WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+	       HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+	       HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+	       AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+	       AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+	WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+	       HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+	       HDMI_ACR_SOURCE); /* select SW CTS value */
+
+	WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+	       HDMI_NULL_SEND | /* send null packets when required */
+	       HDMI_GC_SEND | /* send general control packets */
+	       HDMI_GC_CONT); /* send general control packets every frame */
+
+	WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+	       HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+	       HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+	       HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+	       HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+
+	WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+	       AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+	WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+	       HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
+	       HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+	WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+	evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				      0, 0, 0, 0, 0, 0);
+
+	evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+	WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+	WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+	WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+	WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+}


Property changes on: trunk/sys/dev/drm2/radeon/evergreen_hdmi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreen_reg.h
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreen_reg.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreen_reg.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,244 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreen_reg.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#ifndef __EVERGREEN_REG_H__
+#define __EVERGREEN_REG_H__
+
+/* evergreen */
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0x310
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0x324
+#define EVERGREEN_D3VGA_CONTROL                         0x3e0
+#define EVERGREEN_D4VGA_CONTROL                         0x3e4
+#define EVERGREEN_D5VGA_CONTROL                         0x3e8
+#define EVERGREEN_D6VGA_CONTROL                         0x3ec
+
+#define EVERGREEN_P1PLL_SS_CNTL                         0x414
+#define EVERGREEN_P2PLL_SS_CNTL                         0x454
+#       define EVERGREEN_PxPLL_SS_EN                    (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL			0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV			0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK			0x5bc
+
+#define EVERGREEN_AUDIO_ENABLE				0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID			0x5ec0
+
+/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
+#define EVERGREEN_GRPH_ENABLE                           0x6800
+#define EVERGREEN_GRPH_CONTROL                          0x6804
+#       define EVERGREEN_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_DEPTH_8BPP                0
+#       define EVERGREEN_GRPH_DEPTH_16BPP               1
+#       define EVERGREEN_GRPH_DEPTH_32BPP               2
+#       define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define EVERGREEN_ADDR_SURF_2_BANK               0
+#       define EVERGREEN_ADDR_SURF_4_BANK               1
+#       define EVERGREEN_ADDR_SURF_8_BANK               2
+#       define EVERGREEN_ADDR_SURF_16_BANK              3
+#       define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
+#       define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define EVERGREEN_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define EVERGREEN_GRPH_FORMAT_ARGB1555           0
+#       define EVERGREEN_GRPH_FORMAT_ARGB565            1
+#       define EVERGREEN_GRPH_FORMAT_ARGB4444           2
+#       define EVERGREEN_GRPH_FORMAT_AI88               3
+#       define EVERGREEN_GRPH_FORMAT_MONO16             4
+#       define EVERGREEN_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define EVERGREEN_GRPH_FORMAT_ARGB8888           0
+#       define EVERGREEN_GRPH_FORMAT_ARGB2101010        1
+#       define EVERGREEN_GRPH_FORMAT_32BPP_DIG          2
+#       define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010     3
+#       define EVERGREEN_GRPH_FORMAT_BGRA1010102        4
+#       define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
+#       define EVERGREEN_GRPH_FORMAT_RGB111110          6
+#       define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#       define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#       define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_GRPH_SWAP_CONTROL                     0x680c
+#       define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_ENDIAN_NONE               0
+#       define EVERGREEN_GRPH_ENDIAN_8IN16              1
+#       define EVERGREEN_GRPH_ENDIAN_8IN32              2
+#       define EVERGREEN_GRPH_ENDIAN_8IN64              3
+#       define EVERGREEN_GRPH_RED_CROSSBAR(x)           (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_RED_SEL_R                 0
+#       define EVERGREEN_GRPH_RED_SEL_G                 1
+#       define EVERGREEN_GRPH_RED_SEL_B                 2
+#       define EVERGREEN_GRPH_RED_SEL_A                 3
+#       define EVERGREEN_GRPH_GREEN_CROSSBAR(x)         (((x) & 0x3) << 6)
+#       define EVERGREEN_GRPH_GREEN_SEL_G               0
+#       define EVERGREEN_GRPH_GREEN_SEL_B               1
+#       define EVERGREEN_GRPH_GREEN_SEL_A               2
+#       define EVERGREEN_GRPH_GREEN_SEL_R               3
+#       define EVERGREEN_GRPH_BLUE_CROSSBAR(x)          (((x) & 0x3) << 8)
+#       define EVERGREEN_GRPH_BLUE_SEL_B                0
+#       define EVERGREEN_GRPH_BLUE_SEL_A                1
+#       define EVERGREEN_GRPH_BLUE_SEL_R                2
+#       define EVERGREEN_GRPH_BLUE_SEL_G                3
+#       define EVERGREEN_GRPH_ALPHA_CROSSBAR(x)         (((x) & 0x3) << 10)
+#       define EVERGREEN_GRPH_ALPHA_SEL_A               0
+#       define EVERGREEN_GRPH_ALPHA_SEL_R               1
+#       define EVERGREEN_GRPH_ALPHA_SEL_G               2
+#       define EVERGREEN_GRPH_ALPHA_SEL_B               3
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x6810
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x6814
+#       define EVERGREEN_GRPH_DFQ_ENABLE                (1 << 0)
+#       define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK      0xffffff00
+#define EVERGREEN_GRPH_PITCH                            0x6818
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x681c
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x6820
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X                 0x6824
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y                 0x6828
+#define EVERGREEN_GRPH_X_START                          0x682c
+#define EVERGREEN_GRPH_Y_START                          0x6830
+#define EVERGREEN_GRPH_X_END                            0x6834
+#define EVERGREEN_GRPH_Y_END                            0x6838
+#define EVERGREEN_GRPH_UPDATE                           0x6844
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#       define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x6848
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL                           0x6998
+#       define EVERGREEN_CURSOR_EN                      (1 << 0)
+#       define EVERGREEN_CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#       define EVERGREEN_CURSOR_MONO                    0
+#       define EVERGREEN_CURSOR_24_1                    1
+#       define EVERGREEN_CURSOR_24_8_PRE_MULT           2
+#       define EVERGREEN_CURSOR_24_8_UNPRE_MULT         3
+#       define EVERGREEN_CURSOR_2X_MAGNIFY              (1 << 16)
+#       define EVERGREEN_CURSOR_FORCE_MC_ON             (1 << 20)
+#       define EVERGREEN_CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#       define EVERGREEN_CURSOR_URGENT_ALWAYS           0
+#       define EVERGREEN_CURSOR_URGENT_1_8              1
+#       define EVERGREEN_CURSOR_URGENT_1_4              2
+#       define EVERGREEN_CURSOR_URGENT_3_8              3
+#       define EVERGREEN_CURSOR_URGENT_1_2              4
+#define EVERGREEN_CUR_SURFACE_ADDRESS                   0x699c
+#       define EVERGREEN_CUR_SURFACE_ADDRESS_MASK       0xfffff000
+#define EVERGREEN_CUR_SIZE                              0x69a0
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH              0x69a4
+#define EVERGREEN_CUR_POSITION                          0x69a8
+#define EVERGREEN_CUR_HOT_SPOT                          0x69ac
+#define EVERGREEN_CUR_COLOR1                            0x69b0
+#define EVERGREEN_CUR_COLOR2                            0x69b4
+#define EVERGREEN_CUR_UPDATE                            0x69b8
+#       define EVERGREEN_CURSOR_UPDATE_PENDING          (1 << 0)
+#       define EVERGREEN_CURSOR_UPDATE_TAKEN            (1 << 1)
+#       define EVERGREEN_CURSOR_UPDATE_LOCK             (1 << 16)
+#       define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
+#define EVERGREEN_DC_LUT_RW_MODE                        0x69e0
+#define EVERGREEN_DC_LUT_RW_INDEX                       0x69e4
+#define EVERGREEN_DC_LUT_SEQ_COLOR                      0x69e8
+#define EVERGREEN_DC_LUT_PWL_DATA                       0x69ec
+#define EVERGREEN_DC_LUT_30_COLOR                       0x69f0
+#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE              0x69f4
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK                  0x69f8
+#define EVERGREEN_DC_LUT_AUTOFILL                       0x69fc
+#define EVERGREEN_DC_LUT_CONTROL                        0x6a00
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE              0x6a04
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN             0x6a08
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED               0x6a0c
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE              0x6a10
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN             0x6a14
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED               0x6a18
+
+#define EVERGREEN_DATA_FORMAT                           0x6b00
+#       define EVERGREEN_INTERLEAVE_EN                  (1 << 0)
+#define EVERGREEN_DESKTOP_HEIGHT                        0x6b04
+#define EVERGREEN_VLINE_START_END                       0x6b08
+#define EVERGREEN_VLINE_STATUS                          0x6bb8
+#       define EVERGREEN_VLINE_STAT                     (1 << 12)
+
+#define EVERGREEN_VIEWPORT_START                        0x6d70
+#define EVERGREEN_VIEWPORT_SIZE                         0x6d74
+
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define EVERGREEN_CRTC0_REGISTER_OFFSET                 (0x6df0 - 0x6df0)
+#define EVERGREEN_CRTC1_REGISTER_OFFSET                 (0x79f0 - 0x6df0)
+#define EVERGREEN_CRTC2_REGISTER_OFFSET                 (0x105f0 - 0x6df0)
+#define EVERGREEN_CRTC3_REGISTER_OFFSET                 (0x111f0 - 0x6df0)
+#define EVERGREEN_CRTC4_REGISTER_OFFSET                 (0x11df0 - 0x6df0)
+#define EVERGREEN_CRTC5_REGISTER_OFFSET                 (0x129f0 - 0x6df0)
+
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x6e34
+#define EVERGREEN_CRTC_CONTROL                          0x6e70
+#       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x6e74
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#define EVERGREEN_CRTC_STATUS                           0x6e8c
+#       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
+
+#define EVERGREEN_DC_GPIO_HPD_MASK                      0x64b0
+#define EVERGREEN_DC_GPIO_HPD_A                         0x64b4
+#define EVERGREEN_DC_GPIO_HPD_EN                        0x64b8
+#define EVERGREEN_DC_GPIO_HPD_Y                         0x64bc
+
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE				0x7030
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/evergreen_reg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreen_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreen_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreen_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,518 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreen_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned evergreen_reg_safe_bm[2047] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFF0F7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF, 0xCFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFDDEFFF, 0xCF3FFFFF, 0xFFFFF40F,
+	0xFEFFFFDF, 0xFFFFFFFF, 0xFFFFFFEF, 0xEFFFFFFF,
+	0xFFFFF800, 0xFFFFFFFF, 0xFFFFFFFF, 0xBFFFFF07,
+	0xFFFBF0FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFDF, 0xFFFFFFFF, 0xFFFF7FFE, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFB, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFDE, 0xFFFFFFFF,
+	0xFFDF0FFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xC0000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFC3E4, 0xFFFFFFFF, 0x0000FFFF, 0x00000000,
+	0x000CC000, 0x00000000, 0xFFD00000, 0x00000000,
+	0x00000E00, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0xC0000000, 0xFFFFF8FF, 0xFE07FF00,
+	0x3CF1B003, 0xE39E7BCF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xF7020000, 0xDDD89CDD, 0x201FA3FD, 0xFFFFFFF0,
+	0xBFFF0002, 0xEFC3DF87, 0x7BF0F7E1, 0x1EFC3DF8,
+	0xDFBF0F7E, 0xFFFFF7EF, 0xFFFFFFFF, 0x00000000,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xCFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8,
+};


Property changes on: trunk/sys/dev/drm2/radeon/evergreen_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/evergreend.h
===================================================================
--- trunk/sys/dev/drm2/radeon/evergreend.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/evergreend.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2047 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/evergreend.h 261455 2014-02-04 03:36:42Z eadler $");
+
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS           256
+#define EVERGREEN_MAX_TEMP_GPRS         16
+#define EVERGREEN_MAX_SH_THREADS        256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES  4096
+#define EVERGREEN_MAX_FRC_EOV_CNT       16384
+#define EVERGREEN_MAX_BACKENDS          8
+#define EVERGREEN_MAX_BACKENDS_MASK     0xFF
+#define EVERGREEN_MAX_SIMDS             16
+#define EVERGREEN_MAX_SIMDS_MASK        0xFFFF
+#define EVERGREEN_MAX_PIPES             8
+#define EVERGREEN_MAX_PIPES_MASK        0xFF
+#define EVERGREEN_MAX_LDS_NUM           0xFFFF
+
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN        0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN          0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN         0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN           0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN          0x02010002
+
+/* Registers */
+
+#define RCU_IND_INDEX           			0x100
+#define RCU_IND_DATA            			0x104
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1U << 31)
+#define RLC_GFX_INDEX           			0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define		WRITE_DIS      				(1 << 0)
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x0000000f
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		BANK_INTERLEAVE_SIZE(x)			((x) << 8)
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		ROW_SIZE(x)             		((x) << 28)
+#define GB_BACKEND_MAP  				0x98FC
+#define DMIF_ADDR_CONFIG  				0xBD4
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL  					0x2F4C
+#define		HDP_FLUSH_INVALIDATE_CACHE      	(1 << 0)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+#define	CGTS_USER_TCC_DISABLE				0x914C
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define	BIF_FB_EN						0x5490
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define	CP_STRMOUT_CNTL					0x84FC
+
+#define	CP_COHER_CNTL					0x85F0
+#define	CP_COHER_SIZE					0x85F4
+#define	CP_COHER_BASE					0x85F8
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1 << 28)
+#define		CP_PFP_HALT					(1 << 26)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		STQ_SPLIT(x)					((x) << 0)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_BASE					0xC100
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1U << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_SEM_WAIT_TIMER				0x85BC
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+#define	CP_DEBUG					0xC1FC
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE             0x05ac
+#       define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+#       define DCCG_AUDIO_DTO_SEL         (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE             0x05b0
+#define DCCG_AUDIO_DTO0_MODULE            0x05b4
+#define DCCG_AUDIO_DTO0_LOAD              0x05b8
+#define DCCG_AUDIO_DTO0_CNTL              0x05bc
+
+#define DCCG_AUDIO_DTO1_PHASE             0x05c0
+#define DCCG_AUDIO_DTO1_MODULE            0x05c4
+#define DCCG_AUDIO_DTO1_LOAD              0x05c8
+#define DCCG_AUDIO_DTO1_CNTL              0x05cc
+
+/* DCE 4.0 AFMT */
+#define HDMI_CONTROL                         0x7030
+#       define HDMI_KEEPOUT_MODE             (1 << 0)
+#       define HDMI_PACKET_GEN_VERSION       (1 << 4) /* 0 = r6xx compat */
+#       define HDMI_ERROR_ACK                (1 << 8)
+#       define HDMI_ERROR_MASK               (1 << 9)
+#       define HDMI_DEEP_COLOR_ENABLE        (1 << 24)
+#       define HDMI_DEEP_COLOR_DEPTH         (((x) & 3) << 28)
+#       define HDMI_24BIT_DEEP_COLOR         0
+#       define HDMI_30BIT_DEEP_COLOR         1
+#       define HDMI_36BIT_DEEP_COLOR         2
+#define HDMI_STATUS                          0x7034
+#       define HDMI_ACTIVE_AVMUTE            (1 << 0)
+#       define HDMI_AUDIO_PACKET_ERROR       (1 << 16)
+#       define HDMI_VBI_PACKET_ERROR         (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL            0x7038
+#       define HDMI_AUDIO_DELAY_EN(x)        (((x) & 3) << 4)
+#       define HDMI_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL              0x703c
+#       define HDMI_ACR_SEND                 (1 << 0)
+#       define HDMI_ACR_CONT                 (1 << 1)
+#       define HDMI_ACR_SELECT(x)            (((x) & 3) << 4)
+#       define HDMI_ACR_HW                   0
+#       define HDMI_ACR_32                   1
+#       define HDMI_ACR_44                   2
+#       define HDMI_ACR_48                   3
+#       define HDMI_ACR_SOURCE               (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI_ACR_AUTO_SEND            (1 << 12)
+#       define HDMI_ACR_N_MULTIPLE(x)        (((x) & 7) << 16)
+#       define HDMI_ACR_X1                   1
+#       define HDMI_ACR_X2                   2
+#       define HDMI_ACR_X4                   4
+#       define HDMI_ACR_AUDIO_PRIORITY       (1U << 31)
+#define HDMI_VBI_PACKET_CONTROL              0x7040
+#       define HDMI_NULL_SEND                (1 << 0)
+#       define HDMI_GC_SEND                  (1 << 4)
+#       define HDMI_GC_CONT                  (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0              0x7044
+#       define HDMI_AVI_INFO_SEND            (1 << 0)
+#       define HDMI_AVI_INFO_CONT            (1 << 1)
+#       define HDMI_AUDIO_INFO_SEND          (1 << 4)
+#       define HDMI_AUDIO_INFO_CONT          (1 << 5)
+#       define HDMI_MPEG_INFO_SEND           (1 << 8)
+#       define HDMI_MPEG_INFO_CONT           (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1              0x7048
+#       define HDMI_AVI_INFO_LINE(x)         (((x) & 0x3f) << 0)
+#       define HDMI_AUDIO_INFO_LINE(x)       (((x) & 0x3f) << 8)
+#       define HDMI_MPEG_INFO_LINE(x)        (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL          0x704c
+#       define HDMI_GENERIC0_SEND            (1 << 0)
+#       define HDMI_GENERIC0_CONT            (1 << 1)
+#       define HDMI_GENERIC1_SEND            (1 << 4)
+#       define HDMI_GENERIC1_CONT            (1 << 5)
+#       define HDMI_GENERIC0_LINE(x)         (((x) & 0x3f) << 16)
+#       define HDMI_GENERIC1_LINE(x)         (((x) & 0x3f) << 24)
+#define HDMI_GC                              0x7058
+#       define HDMI_GC_AVMUTE                (1 << 0)
+#       define HDMI_GC_AVMUTE_CONT           (1 << 2)
+#define AFMT_AUDIO_PACKET_CONTROL2           0x705c
+#       define AFMT_AUDIO_LAYOUT_OVRD        (1 << 0)
+#       define AFMT_AUDIO_LAYOUT_SELECT      (1 << 1)
+#       define AFMT_60958_CS_SOURCE          (1 << 4)
+#       define AFMT_AUDIO_CHANNEL_ENABLE(x)  (((x) & 0xff) << 8)
+#       define AFMT_DP_AUDIO_STREAM_ID(x)    (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0                       0x7084
+#       define AFMT_AVI_INFO_CHECKSUM(x)     (((x) & 0xff) << 0)
+#       define AFMT_AVI_INFO_S(x)            (((x) & 3) << 8)
+#       define AFMT_AVI_INFO_B(x)            (((x) & 3) << 10)
+#       define AFMT_AVI_INFO_A(x)            (((x) & 1) << 12)
+#       define AFMT_AVI_INFO_Y(x)            (((x) & 3) << 13)
+#       define AFMT_AVI_INFO_Y_RGB           0
+#       define AFMT_AVI_INFO_Y_YCBCR422      1
+#       define AFMT_AVI_INFO_Y_YCBCR444      2
+#       define AFMT_AVI_INFO_Y_A_B_S(x)      (((x) & 0xff) << 8)
+#       define AFMT_AVI_INFO_R(x)            (((x) & 0xf) << 16)
+#       define AFMT_AVI_INFO_M(x)            (((x) & 0x3) << 20)
+#       define AFMT_AVI_INFO_C(x)            (((x) & 0x3) << 22)
+#       define AFMT_AVI_INFO_C_M_R(x)        (((x) & 0xff) << 16)
+#       define AFMT_AVI_INFO_SC(x)           (((x) & 0x3) << 24)
+#       define AFMT_AVI_INFO_Q(x)            (((x) & 0x3) << 26)
+#       define AFMT_AVI_INFO_EC(x)           (((x) & 0x3) << 28)
+#       define AFMT_AVI_INFO_ITC(x)          (((x) & 0x1) << 31)
+#       define AFMT_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1                       0x7088
+#       define AFMT_AVI_INFO_VIC(x)          (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_PR(x)           (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_CN(x)           (((x) & 0x3) << 12)
+#       define AFMT_AVI_INFO_YQ(x)           (((x) & 0x3) << 14)
+#       define AFMT_AVI_INFO_TOP(x)          (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2                       0x708c
+#       define AFMT_AVI_INFO_BOTTOM(x)       (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_LEFT(x)         (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3                       0x7090
+#       define AFMT_AVI_INFO_RIGHT(x)        (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_VERSION(x)      (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0                      0x7094
+#       define AFMT_MPEG_INFO_CHECKSUM(x)    (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MB0(x)         (((x) & 0xff) << 8)
+#       define AFMT_MPEG_INFO_MB1(x)         (((x) & 0xff) << 16)
+#       define AFMT_MPEG_INFO_MB2(x)         (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1                      0x7098
+#       define AFMT_MPEG_INFO_MB3(x)         (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MF(x)          (((x) & 3) << 8)
+#       define AFMT_MPEG_INFO_FR(x)          (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR                    0x709c
+#define AFMT_GENERIC0_0                      0x70a0
+#define AFMT_GENERIC0_1                      0x70a4
+#define AFMT_GENERIC0_2                      0x70a8
+#define AFMT_GENERIC0_3                      0x70ac
+#define AFMT_GENERIC0_4                      0x70b0
+#define AFMT_GENERIC0_5                      0x70b4
+#define AFMT_GENERIC0_6                      0x70b8
+#define AFMT_GENERIC1_HDR                    0x70bc
+#define AFMT_GENERIC1_0                      0x70c0
+#define AFMT_GENERIC1_1                      0x70c4
+#define AFMT_GENERIC1_2                      0x70c8
+#define AFMT_GENERIC1_3                      0x70cc
+#define AFMT_GENERIC1_4                      0x70d0
+#define AFMT_GENERIC1_5                      0x70d4
+#define AFMT_GENERIC1_6                      0x70d8
+#define HDMI_ACR_32_0                        0x70dc
+#       define HDMI_ACR_CTS_32(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1                        0x70e0
+#       define HDMI_ACR_N_32(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0                        0x70e4
+#       define HDMI_ACR_CTS_44(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1                        0x70e8
+#       define HDMI_ACR_N_44(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0                        0x70ec
+#       define HDMI_ACR_CTS_48(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1                        0x70f0
+#       define HDMI_ACR_N_48(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0                    0x70f4
+#define HDMI_ACR_STATUS_1                    0x70f8
+#define AFMT_AUDIO_INFO0                     0x70fc
+#       define AFMT_AUDIO_INFO_CHECKSUM(x)   (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_CC(x)         (((x) & 7) << 8)
+#       define AFMT_AUDIO_INFO_CT(x)         (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x)   (((x) & 0xff) << 16)
+#       define AFMT_AUDIO_INFO_CXT(x)        (((x) & 0x1f) << 24)
+#define AFMT_AUDIO_INFO1                     0x7100
+#       define AFMT_AUDIO_INFO_CA(x)         (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_LSV(x)        (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_DM_INH(x)     (((x) & 1) << 15)
+#       define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#       define AFMT_AUDIO_INFO_LFEBPL(x)     (((x) & 3) << 16)
+#define AFMT_60958_0                         0x7104
+#       define AFMT_60958_CS_A(x)            (((x) & 1) << 0)
+#       define AFMT_60958_CS_B(x)            (((x) & 1) << 1)
+#       define AFMT_60958_CS_C(x)            (((x) & 1) << 2)
+#       define AFMT_60958_CS_D(x)            (((x) & 3) << 3)
+#       define AFMT_60958_CS_MODE(x)         (((x) & 3) << 6)
+#       define AFMT_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define AFMT_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define AFMT_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#define AFMT_60958_1                         0x7108
+#       define AFMT_60958_CS_WORD_LENGTH(x)  (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_VALID_L(x)      (((x) & 1) << 16)
+#       define AFMT_60958_CS_VALID_R(x)      (((x) & 1) << 18)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL               0x710c
+#       define AFMT_AUDIO_CRC_EN             (1 << 0)
+#define AFMT_RAMP_CONTROL0                   0x7110
+#       define AFMT_RAMP_MAX_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_RAMP_DATA_SIGN           (1U << 31)
+#define AFMT_RAMP_CONTROL1                   0x7114
+#       define AFMT_RAMP_MIN_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2                   0x7118
+#       define AFMT_RAMP_INC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3                   0x711c
+#       define AFMT_RAMP_DEC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_60958_2                         0x7120
+#       define AFMT_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+#define AFMT_STATUS                          0x7128
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AUDIO_HBR_ENABLE         (1 << 8)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x712c
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL              0x7130
+#       define AFMT_GENERIC0_UPDATE          (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0              0x7134
+#       define AFMT_AUDIO_INFO_SOURCE        (1 << 6) /* 0 - sound block; 1 - afmt regs */
+#       define AFMT_AUDIO_INFO_UPDATE        (1 << 7)
+#       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
+#define AFMT_GENERIC0_7                      0x7138
+
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x5fb8 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x5e78
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1U << 31)
+
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SH					(1 << 9)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VC					(1 << 13)
+#define		SOFT_RESET_VGT					(1 << 14)
+
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		SX_CLEAN					(1 << 11)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		VGT_BUSY_NO_DMA					(1 << 16)
+#define		VGT_BUSY					(1 << 17)
+#define		SX_BUSY 					(1 << 20)
+#define		SH_BUSY 					(1 << 21)
+#define		SPI_BUSY					(1 << 22)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1U << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_SX_CLEAN					(1 << 0)
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SH_BUSY					(1 << 28)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1U << 31)
+/* evergreen */
+#define	CG_THERMAL_CTRL					0x72c
+#define		TOFFSET_MASK			        0x00003FE0
+#define		TOFFSET_SHIFT			        5
+#define	CG_MULT_THERMAL_STATUS				0x740
+#define		ASIC_T(x)			        ((x) << 16)
+#define		ASIC_T_MASK			        0x07FF0000
+#define		ASIC_T_SHIFT			        16
+#define	CG_TS0_STATUS					0x760
+#define		TS0_ADC_DOUT_MASK			0x000003FF
+#define		TS0_ADC_DOUT_SHIFT			0
+/* APU */
+#define	CG_THERMAL_STATUS			        0x678
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define		BLACKOUT_MODE_MASK			0x00000007
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define	FUS_MC_ARB_RAMCFG				0x2768
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_FUS_VM_FB_OFFSET				0x2898
+#define	MC_VM_MB_L1_TLB0_CNTL				0x2234
+#define	MC_VM_MB_L1_TLB1_CNTL				0x2238
+#define	MC_VM_MB_L1_TLB2_CNTL				0x223C
+#define	MC_VM_MB_L1_TLB3_CNTL				0x2240
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			((x)<<15)
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			((x)<<18)
+#define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+#define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+#define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
+
+#define	FUS_MC_VM_MD_L1_TLB0_CNTL			0x265C
+#define	FUS_MC_VM_MD_L1_TLB1_CNTL			0x2660
+#define	FUS_MC_VM_MD_L1_TLB2_CNTL			0x2664
+
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define	PA_SC_ENHANCE					0x8BF0
+#define PA_SC_AA_CONFIG					0x28C04
+#define         MSAA_NUM_SAMPLES_SHIFT                  0
+#define         MSAA_NUM_SAMPLES_MASK                   0x3
+#define PA_SC_CLIPRECT_RULE				0x2820C
+#define	PA_SC_EDGERULE					0x28230
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SMX_SAR_CTL0					0xA008
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		NUMBER_OF_SETS(x)				((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		CS_PRIO(x)					((x) << 18)
+#define		LS_PRIO(x)					((x) << 20)
+#define		HS_PRIO(x)					((x) << 22)
+#define		PS_PRIO(x)					((x) << 24)
+#define		VS_PRIO(x)					((x) << 26)
+#define		GS_PRIO(x)					((x) << 28)
+#define		ES_PRIO(x)					((x) << 30)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define	SQ_GPR_RESOURCE_MGMT_2				0x8C08
+#define		NUM_GS_GPRS(x)					((x) << 0)
+#define		NUM_ES_GPRS(x)					((x) << 16)
+#define	SQ_GPR_RESOURCE_MGMT_3				0x8C0C
+#define		NUM_HS_GPRS(x)					((x) << 0)
+#define		NUM_LS_GPRS(x)					((x) << 16)
+#define	SQ_GLOBAL_GPR_RESOURCE_MGMT_1			0x8C10
+#define	SQ_GLOBAL_GPR_RESOURCE_MGMT_2			0x8C14
+#define	SQ_THREAD_RESOURCE_MGMT				0x8C18
+#define		NUM_PS_THREADS(x)				((x) << 0)
+#define		NUM_VS_THREADS(x)				((x) << 8)
+#define		NUM_GS_THREADS(x)				((x) << 16)
+#define		NUM_ES_THREADS(x)				((x) << 24)
+#define	SQ_THREAD_RESOURCE_MGMT_2			0x8C1C
+#define		NUM_HS_THREADS(x)				((x) << 0)
+#define		NUM_LS_THREADS(x)				((x) << 8)
+#define	SQ_STACK_RESOURCE_MGMT_1			0x8C20
+#define		NUM_PS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_VS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_2			0x8C24
+#define		NUM_GS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_ES_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_3			0x8C28
+#define		NUM_HS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_LS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_DYN_GPR_CNTL_PS_FLUSH_REQ    		0x8D8C
+#define	SQ_DYN_GPR_SIMD_LOCK_EN    			0x8D94
+#define	SQ_STATIC_THREAD_MGMT_1    			0x8E20
+#define	SQ_STATIC_THREAD_MGMT_2    			0x8E24
+#define	SQ_STATIC_THREAD_MGMT_3    			0x8E28
+#define	SQ_LDS_RESOURCE_MGMT    			0x8E2C
+
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_MEMORY_EXPORT_BASE				0x9010
+#define	SX_MISC						0x28350
+
+#define CB_PERF_CTR0_SEL_0				0x9A20
+#define CB_PERF_CTR0_SEL_1				0x9A24
+#define CB_PERF_CTR1_SEL_0				0x9A28
+#define CB_PERF_CTR1_SEL_1				0x9A2C
+#define CB_PERF_CTR2_SEL_0				0x9A30
+#define CB_PERF_CTR2_SEL_1				0x9A34
+#define CB_PERF_CTR3_SEL_0				0x9A38
+#define CB_PERF_CTR3_SEL_1				0x9A3C
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+
+#define	TCP_CHAN_STEER_LO				0x960c
+#define	TCP_CHAN_STEER_HI				0x9610
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT1_CNTL2				0x1434
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+
+#define	WAIT_UNTIL					0x8040
+
+#define	SRBM_STATUS				        0x0E50
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SRBM_SOFT_RESET_ALL_MASK    	       	0x00FEEFA6
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_CG				(1 << 2)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_RLC				(1 << 13)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+/* display watermarks */
+#define	DC_LB_MEMORY_SPLIT				  0x6b0c
+#define	PRIORITY_A_CNT			                  0x6b18
+#define		PRIORITY_MARK_MASK			  0x7fff
+#define		PRIORITY_OFF				  (1 << 16)
+#define		PRIORITY_ALWAYS_ON			  (1 << 20)
+#define	PRIORITY_B_CNT			                  0x6b1c
+#define	PIPE0_ARBITRATION_CONTROL3			  0x0bf0
+#       define LATENCY_WATERMARK_MASK(x)                  ((x) << 16)
+#define	PIPE0_LATENCY_CONTROL			          0x0bf4
+#       define LATENCY_LOW_WATERMARK(x)                   ((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)                  ((x) << 16)
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1U << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define CP_INT_CNTL                                     0xc124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define SCRATCH_INT_ENABLE                       (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define IB2_INT_ENABLE                           (1 << 29)
+#       define IB1_INT_ENABLE                           (1 << 30)
+#       define RB_INT_ENABLE                            (1U << 31)
+#define CP_INT_STATUS                                   0xc128
+#       define SCRATCH_INT_STAT                         (1 << 25)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define IB2_INT_STAT                             (1 << 29)
+#       define IB1_INT_STAT                             (1 << 30)
+#       define RB_INT_STAT                              (1U << 31)
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x6bb8
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x6bbc
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x6b40
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DACA_AUTODETECT_INT_CONTROL			0x66c8
+#define	DACB_AUTODETECT_INT_CONTROL			0x67c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+/* ASYNC DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG  				  0xD0B8
+
+#define CAYMAN_DMA1_CNTL                                  0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+/*
+ * PM4
+ */
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n)	((PACKET_TYPE0 << 30) |				\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((PACKET_TYPE3 << 30) |				\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_MODE_CONTROL				0x18
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_OFFSET			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1U << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_CB8_DEST_BASE_ENA    (1 << 15)
+#              define PACKET3_CB9_DEST_BASE_ENA    (1 << 16)
+#              define PACKET3_CB10_DEST_BASE_ENA   (1 << 17)
+#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 18)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_VC_ACTION_ENA        (1 << 24)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SX_ACTION_ENA        (1 << 28)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_RB_OFFSET				0x4B
+#define	PACKET3_ALU_PS_CONST_BUFFER_COPY		0x4C
+#define	PACKET3_ALU_VS_CONST_BUFFER_COPY		0x4D
+#define	PACKET3_ALU_PS_CONST_UPDATE		        0x4E
+#define	PACKET3_ALU_VS_CONST_UPDATE		        0x4F
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+/* alu const buffers only; no reg file */
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_START			0x0003a500
+#define		PACKET3_SET_BOOL_CONST_END			0x0003a518
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_START			0x0003a200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003a500
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_START			0x00030000
+#define		PACKET3_SET_RESOURCE_END			0x00038000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_START			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003c600
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_START			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003ff0c
+#define	PACKET3_SET_RESOURCE_OFFSET			0x70
+#define	PACKET3_SET_ALU_CONST_VS			0x71
+#define	PACKET3_SET_ALU_CONST_DI			0x72
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_APPEND_CNT			        0x75
+
+#define	SQ_RESOURCE_CONSTANT_WORD7_0				0x3001c
+#define		S__SQ_CONSTANT_TYPE(x)			(((x) & 3) << 30)
+#define		G__SQ_CONSTANT_TYPE(x)			(((x) >> 30) & 3)
+#define			SQ_TEX_VTX_INVALID_TEXTURE			0x0
+#define			SQ_TEX_VTX_INVALID_BUFFER			0x1
+#define			SQ_TEX_VTX_VALID_TEXTURE			0x2
+#define			SQ_TEX_VTX_VALID_BUFFER				0x3
+
+#define VGT_VTX_VECT_EJECT_REG				0x88b0
+
+#define SQ_CONST_MEM_BASE				0x8df8
+
+#define SQ_ESGS_RING_BASE				0x8c40
+#define SQ_ESGS_RING_SIZE				0x8c44
+#define SQ_GSVS_RING_BASE				0x8c48
+#define SQ_GSVS_RING_SIZE				0x8c4c
+#define SQ_ESTMP_RING_BASE				0x8c50
+#define SQ_ESTMP_RING_SIZE				0x8c54
+#define SQ_GSTMP_RING_BASE				0x8c58
+#define SQ_GSTMP_RING_SIZE				0x8c5c
+#define SQ_VSTMP_RING_BASE				0x8c60
+#define SQ_VSTMP_RING_SIZE				0x8c64
+#define SQ_PSTMP_RING_BASE				0x8c68
+#define SQ_PSTMP_RING_SIZE				0x8c6c
+#define SQ_LSTMP_RING_BASE				0x8e10
+#define SQ_LSTMP_RING_SIZE				0x8e14
+#define SQ_HSTMP_RING_BASE				0x8e18
+#define SQ_HSTMP_RING_SIZE				0x8e1c
+#define VGT_TF_RING_SIZE				0x8988
+
+#define SQ_ESGS_RING_ITEMSIZE				0x28900
+#define SQ_GSVS_RING_ITEMSIZE				0x28904
+#define SQ_ESTMP_RING_ITEMSIZE				0x28908
+#define SQ_GSTMP_RING_ITEMSIZE				0x2890c
+#define SQ_VSTMP_RING_ITEMSIZE				0x28910
+#define SQ_PSTMP_RING_ITEMSIZE				0x28914
+#define SQ_LSTMP_RING_ITEMSIZE				0x28830
+#define SQ_HSTMP_RING_ITEMSIZE				0x28834
+
+#define SQ_GS_VERT_ITEMSIZE				0x2891c
+#define SQ_GS_VERT_ITEMSIZE_1				0x28920
+#define SQ_GS_VERT_ITEMSIZE_2				0x28924
+#define SQ_GS_VERT_ITEMSIZE_3				0x28928
+#define SQ_GSVS_RING_OFFSET_1				0x2892c
+#define SQ_GSVS_RING_OFFSET_2				0x28930
+#define SQ_GSVS_RING_OFFSET_3				0x28934
+
+#define SQ_ALU_CONST_BUFFER_SIZE_PS_0			0x28140
+#define SQ_ALU_CONST_BUFFER_SIZE_HS_0			0x28f80
+
+#define SQ_ALU_CONST_CACHE_PS_0				0x28940
+#define SQ_ALU_CONST_CACHE_PS_1				0x28944
+#define SQ_ALU_CONST_CACHE_PS_2				0x28948
+#define SQ_ALU_CONST_CACHE_PS_3				0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4				0x28950
+#define SQ_ALU_CONST_CACHE_PS_5				0x28954
+#define SQ_ALU_CONST_CACHE_PS_6				0x28958
+#define SQ_ALU_CONST_CACHE_PS_7				0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8				0x28960
+#define SQ_ALU_CONST_CACHE_PS_9				0x28964
+#define SQ_ALU_CONST_CACHE_PS_10			0x28968
+#define SQ_ALU_CONST_CACHE_PS_11			0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12			0x28970
+#define SQ_ALU_CONST_CACHE_PS_13			0x28974
+#define SQ_ALU_CONST_CACHE_PS_14			0x28978
+#define SQ_ALU_CONST_CACHE_PS_15			0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0				0x28980
+#define SQ_ALU_CONST_CACHE_VS_1				0x28984
+#define SQ_ALU_CONST_CACHE_VS_2				0x28988
+#define SQ_ALU_CONST_CACHE_VS_3				0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4				0x28990
+#define SQ_ALU_CONST_CACHE_VS_5				0x28994
+#define SQ_ALU_CONST_CACHE_VS_6				0x28998
+#define SQ_ALU_CONST_CACHE_VS_7				0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8				0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9				0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10			0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11			0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12			0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13			0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14			0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15			0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0				0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1				0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2				0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3				0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4				0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5				0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6				0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7				0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8				0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9				0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10			0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11			0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12			0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13			0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14			0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15			0x289fc
+#define SQ_ALU_CONST_CACHE_HS_0				0x28f00
+#define SQ_ALU_CONST_CACHE_HS_1				0x28f04
+#define SQ_ALU_CONST_CACHE_HS_2				0x28f08
+#define SQ_ALU_CONST_CACHE_HS_3				0x28f0c
+#define SQ_ALU_CONST_CACHE_HS_4				0x28f10
+#define SQ_ALU_CONST_CACHE_HS_5				0x28f14
+#define SQ_ALU_CONST_CACHE_HS_6				0x28f18
+#define SQ_ALU_CONST_CACHE_HS_7				0x28f1c
+#define SQ_ALU_CONST_CACHE_HS_8				0x28f20
+#define SQ_ALU_CONST_CACHE_HS_9				0x28f24
+#define SQ_ALU_CONST_CACHE_HS_10			0x28f28
+#define SQ_ALU_CONST_CACHE_HS_11			0x28f2c
+#define SQ_ALU_CONST_CACHE_HS_12			0x28f30
+#define SQ_ALU_CONST_CACHE_HS_13			0x28f34
+#define SQ_ALU_CONST_CACHE_HS_14			0x28f38
+#define SQ_ALU_CONST_CACHE_HS_15			0x28f3c
+#define SQ_ALU_CONST_CACHE_LS_0				0x28f40
+#define SQ_ALU_CONST_CACHE_LS_1				0x28f44
+#define SQ_ALU_CONST_CACHE_LS_2				0x28f48
+#define SQ_ALU_CONST_CACHE_LS_3				0x28f4c
+#define SQ_ALU_CONST_CACHE_LS_4				0x28f50
+#define SQ_ALU_CONST_CACHE_LS_5				0x28f54
+#define SQ_ALU_CONST_CACHE_LS_6				0x28f58
+#define SQ_ALU_CONST_CACHE_LS_7				0x28f5c
+#define SQ_ALU_CONST_CACHE_LS_8				0x28f60
+#define SQ_ALU_CONST_CACHE_LS_9				0x28f64
+#define SQ_ALU_CONST_CACHE_LS_10			0x28f68
+#define SQ_ALU_CONST_CACHE_LS_11			0x28f6c
+#define SQ_ALU_CONST_CACHE_LS_12			0x28f70
+#define SQ_ALU_CONST_CACHE_LS_13			0x28f74
+#define SQ_ALU_CONST_CACHE_LS_14			0x28f78
+#define SQ_ALU_CONST_CACHE_LS_15			0x28f7c
+
+#define PA_SC_SCREEN_SCISSOR_TL                         0x28030
+#define PA_SC_GENERIC_SCISSOR_TL                        0x28240
+#define PA_SC_WINDOW_SCISSOR_TL                         0x28204
+
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define VGT_INDEX_TYPE                                  0x895C
+
+#define VGT_NUM_INDICES                                 0x8970
+
+#define VGT_COMPUTE_DIM_X                               0x8990
+#define VGT_COMPUTE_DIM_Y                               0x8994
+#define VGT_COMPUTE_DIM_Z                               0x8998
+#define VGT_COMPUTE_START_X                             0x899C
+#define VGT_COMPUTE_START_Y                             0x89A0
+#define VGT_COMPUTE_START_Z                             0x89A4
+#define VGT_COMPUTE_INDEX                               0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE                   0x89AC
+#define VGT_HS_OFFCHIP_PARAM                            0x89B0
+
+#define DB_DEBUG					0x9830
+#define DB_DEBUG2					0x9834
+#define DB_DEBUG3					0x9838
+#define DB_DEBUG4					0x983C
+#define DB_WATERMARKS					0x9854
+#define DB_DEPTH_CONTROL				0x28800
+#define R_028800_DB_DEPTH_CONTROL                    0x028800
+#define   S_028800_STENCIL_ENABLE(x)                   (((x) & 0x1) << 0)
+#define   G_028800_STENCIL_ENABLE(x)                   (((x) >> 0) & 0x1)
+#define   C_028800_STENCIL_ENABLE                      0xFFFFFFFE
+#define   S_028800_Z_ENABLE(x)                         (((x) & 0x1) << 1)
+#define   G_028800_Z_ENABLE(x)                         (((x) >> 1) & 0x1)
+#define   C_028800_Z_ENABLE                            0xFFFFFFFD
+#define   S_028800_Z_WRITE_ENABLE(x)                   (((x) & 0x1) << 2)
+#define   G_028800_Z_WRITE_ENABLE(x)                   (((x) >> 2) & 0x1)
+#define   C_028800_Z_WRITE_ENABLE                      0xFFFFFFFB
+#define   S_028800_ZFUNC(x)                            (((x) & 0x7) << 4)
+#define   G_028800_ZFUNC(x)                            (((x) >> 4) & 0x7)
+#define   C_028800_ZFUNC                               0xFFFFFF8F
+#define   S_028800_BACKFACE_ENABLE(x)                  (((x) & 0x1) << 7)
+#define   G_028800_BACKFACE_ENABLE(x)                  (((x) >> 7) & 0x1)
+#define   C_028800_BACKFACE_ENABLE                     0xFFFFFF7F
+#define   S_028800_STENCILFUNC(x)                      (((x) & 0x7) << 8)
+#define   G_028800_STENCILFUNC(x)                      (((x) >> 8) & 0x7)
+#define   C_028800_STENCILFUNC                         0xFFFFF8FF
+#define     V_028800_STENCILFUNC_NEVER                 0x00000000
+#define     V_028800_STENCILFUNC_LESS                  0x00000001
+#define     V_028800_STENCILFUNC_EQUAL                 0x00000002
+#define     V_028800_STENCILFUNC_LEQUAL                0x00000003
+#define     V_028800_STENCILFUNC_GREATER               0x00000004
+#define     V_028800_STENCILFUNC_NOTEQUAL              0x00000005
+#define     V_028800_STENCILFUNC_GEQUAL                0x00000006
+#define     V_028800_STENCILFUNC_ALWAYS                0x00000007
+#define   S_028800_STENCILFAIL(x)                      (((x) & 0x7) << 11)
+#define   G_028800_STENCILFAIL(x)                      (((x) >> 11) & 0x7)
+#define   C_028800_STENCILFAIL                         0xFFFFC7FF
+#define     V_028800_STENCIL_KEEP                      0x00000000
+#define     V_028800_STENCIL_ZERO                      0x00000001
+#define     V_028800_STENCIL_REPLACE                   0x00000002
+#define     V_028800_STENCIL_INCR                      0x00000003
+#define     V_028800_STENCIL_DECR                      0x00000004
+#define     V_028800_STENCIL_INVERT                    0x00000005
+#define     V_028800_STENCIL_INCR_WRAP                 0x00000006
+#define     V_028800_STENCIL_DECR_WRAP                 0x00000007
+#define   S_028800_STENCILZPASS(x)                     (((x) & 0x7) << 14)
+#define   G_028800_STENCILZPASS(x)                     (((x) >> 14) & 0x7)
+#define   C_028800_STENCILZPASS                        0xFFFE3FFF
+#define   S_028800_STENCILZFAIL(x)                     (((x) & 0x7) << 17)
+#define   G_028800_STENCILZFAIL(x)                     (((x) >> 17) & 0x7)
+#define   C_028800_STENCILZFAIL                        0xFFF1FFFF
+#define   S_028800_STENCILFUNC_BF(x)                   (((x) & 0x7) << 20)
+#define   G_028800_STENCILFUNC_BF(x)                   (((x) >> 20) & 0x7)
+#define   C_028800_STENCILFUNC_BF                      0xFF8FFFFF
+#define   S_028800_STENCILFAIL_BF(x)                   (((x) & 0x7) << 23)
+#define   G_028800_STENCILFAIL_BF(x)                   (((x) >> 23) & 0x7)
+#define   C_028800_STENCILFAIL_BF                      0xFC7FFFFF
+#define   S_028800_STENCILZPASS_BF(x)                  (((x) & 0x7) << 26)
+#define   G_028800_STENCILZPASS_BF(x)                  (((x) >> 26) & 0x7)
+#define   C_028800_STENCILZPASS_BF                     0xE3FFFFFF
+#define   S_028800_STENCILZFAIL_BF(x)                  (((x) & 0x7) << 29)
+#define   G_028800_STENCILZFAIL_BF(x)                  (((x) >> 29) & 0x7)
+#define   C_028800_STENCILZFAIL_BF                     0x1FFFFFFF
+#define DB_DEPTH_VIEW					0x28008
+#define R_028008_DB_DEPTH_VIEW                       0x00028008
+#define   S_028008_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028008_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028008_SLICE_START                         0xFFFFF800
+#define   S_028008_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028008_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028008_SLICE_MAX                           0xFF001FFF
+#define DB_HTILE_DATA_BASE				0x28014
+#define DB_HTILE_SURFACE				0x28abc
+#define   S_028ABC_HTILE_WIDTH(x)                      (((x) & 0x1) << 0)
+#define   G_028ABC_HTILE_WIDTH(x)                      (((x) >> 0) & 0x1)
+#define   C_028ABC_HTILE_WIDTH                         0xFFFFFFFE
+#define   S_028ABC_HTILE_HEIGHT(x)                      (((x) & 0x1) << 1)
+#define   G_028ABC_HTILE_HEIGHT(x)                      (((x) >> 1) & 0x1)
+#define   C_028ABC_HTILE_HEIGHT                         0xFFFFFFFD
+#define   G_028ABC_LINEAR(x)                           (((x) >> 2) & 0x1)
+#define DB_Z_INFO					0x28040
+#       define Z_ARRAY_MODE(x)                          ((x) << 4)
+#       define DB_TILE_SPLIT(x)                         (((x) & 0x7) << 8)
+#       define DB_NUM_BANKS(x)                          (((x) & 0x3) << 12)
+#       define DB_BANK_WIDTH(x)                         (((x) & 0x3) << 16)
+#       define DB_BANK_HEIGHT(x)                        (((x) & 0x3) << 20)
+#       define DB_MACRO_TILE_ASPECT(x)                  (((x) & 0x3) << 24)
+#define R_028040_DB_Z_INFO                       0x028040
+#define   S_028040_FORMAT(x)                           (((x) & 0x3) << 0)
+#define   G_028040_FORMAT(x)                           (((x) >> 0) & 0x3)
+#define   C_028040_FORMAT                              0xFFFFFFFC
+#define     V_028040_Z_INVALID                     0x00000000
+#define     V_028040_Z_16                          0x00000001
+#define     V_028040_Z_24                          0x00000002
+#define     V_028040_Z_32_FLOAT                    0x00000003
+#define   S_028040_ARRAY_MODE(x)                       (((x) & 0xF) << 4)
+#define   G_028040_ARRAY_MODE(x)                       (((x) >> 4) & 0xF)
+#define   C_028040_ARRAY_MODE                          0xFFFFFF0F
+#define   S_028040_READ_SIZE(x)                        (((x) & 0x1) << 28)
+#define   G_028040_READ_SIZE(x)                        (((x) >> 28) & 0x1)
+#define   C_028040_READ_SIZE                           0xEFFFFFFF
+#define   S_028040_TILE_SURFACE_ENABLE(x)              (((x) & 0x1) << 29)
+#define   G_028040_TILE_SURFACE_ENABLE(x)              (((x) >> 29) & 0x1)
+#define   C_028040_TILE_SURFACE_ENABLE                 0xDFFFFFFF
+#define   S_028040_ZRANGE_PRECISION(x)                 (((x) & 0x1) << 31)
+#define   G_028040_ZRANGE_PRECISION(x)                 (((x) >> 31) & 0x1)
+#define   C_028040_ZRANGE_PRECISION                    0x7FFFFFFF
+#define   S_028040_TILE_SPLIT(x)                       (((x) & 0x7) << 8)
+#define   G_028040_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
+#define   S_028040_NUM_BANKS(x)                        (((x) & 0x3) << 12)
+#define   G_028040_NUM_BANKS(x)                        (((x) >> 12) & 0x3)
+#define   S_028040_BANK_WIDTH(x)                       (((x) & 0x3) << 16)
+#define   G_028040_BANK_WIDTH(x)                       (((x) >> 16) & 0x3)
+#define   S_028040_BANK_HEIGHT(x)                      (((x) & 0x3) << 20)
+#define   G_028040_BANK_HEIGHT(x)                      (((x) >> 20) & 0x3)
+#define   S_028040_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 24)
+#define   G_028040_MACRO_TILE_ASPECT(x)                (((x) >> 24) & 0x3)
+#define DB_STENCIL_INFO					0x28044
+#define R_028044_DB_STENCIL_INFO                     0x028044
+#define   S_028044_FORMAT(x)                           (((x) & 0x1) << 0)
+#define   G_028044_FORMAT(x)                           (((x) >> 0) & 0x1)
+#define   C_028044_FORMAT                              0xFFFFFFFE
+#define	    V_028044_STENCIL_INVALID			0
+#define	    V_028044_STENCIL_8				1
+#define   G_028044_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
+#define DB_Z_READ_BASE					0x28048
+#define DB_STENCIL_READ_BASE				0x2804c
+#define DB_Z_WRITE_BASE					0x28050
+#define DB_STENCIL_WRITE_BASE				0x28054
+#define DB_DEPTH_SIZE					0x28058
+#define R_028058_DB_DEPTH_SIZE                       0x028058
+#define   S_028058_PITCH_TILE_MAX(x)                   (((x) & 0x7FF) << 0)
+#define   G_028058_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x7FF)
+#define   C_028058_PITCH_TILE_MAX                      0xFFFFF800
+#define   S_028058_HEIGHT_TILE_MAX(x)                   (((x) & 0x7FF) << 11)
+#define   G_028058_HEIGHT_TILE_MAX(x)                   (((x) >> 11) & 0x7FF)
+#define   C_028058_HEIGHT_TILE_MAX                      0xFFC007FF
+#define R_02805C_DB_DEPTH_SLICE                      0x02805C
+#define   S_02805C_SLICE_TILE_MAX(x)                   (((x) & 0x3FFFFF) << 0)
+#define   G_02805C_SLICE_TILE_MAX(x)                   (((x) >> 0) & 0x3FFFFF)
+#define   C_02805C_SLICE_TILE_MAX                      0xFFC00000
+
+#define SQ_PGM_START_PS					0x28840
+#define SQ_PGM_START_VS					0x2885c
+#define SQ_PGM_START_GS					0x28874
+#define SQ_PGM_START_ES					0x2888c
+#define SQ_PGM_START_FS					0x288a4
+#define SQ_PGM_START_HS					0x288b8
+#define SQ_PGM_START_LS					0x288d0
+
+#define	VGT_STRMOUT_BUFFER_BASE_0			0x28AD8
+#define	VGT_STRMOUT_BUFFER_BASE_1			0x28AE8
+#define	VGT_STRMOUT_BUFFER_BASE_2			0x28AF8
+#define	VGT_STRMOUT_BUFFER_BASE_3			0x28B08
+#define VGT_STRMOUT_BUFFER_SIZE_0			0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1			0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2			0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3			0x28B00
+#define VGT_STRMOUT_CONFIG				0x28b94
+#define VGT_STRMOUT_BUFFER_CONFIG			0x28b98
+
+#define CB_TARGET_MASK					0x28238
+#define CB_SHADER_MASK					0x2823c
+
+#define GDS_ADDR_BASE					0x28720
+
+#define	CB_IMMED0_BASE					0x28b9c
+#define	CB_IMMED1_BASE					0x28ba0
+#define	CB_IMMED2_BASE					0x28ba4
+#define	CB_IMMED3_BASE					0x28ba8
+#define	CB_IMMED4_BASE					0x28bac
+#define	CB_IMMED5_BASE					0x28bb0
+#define	CB_IMMED6_BASE					0x28bb4
+#define	CB_IMMED7_BASE					0x28bb8
+#define	CB_IMMED8_BASE					0x28bbc
+#define	CB_IMMED9_BASE					0x28bc0
+#define	CB_IMMED10_BASE					0x28bc4
+#define	CB_IMMED11_BASE					0x28bc8
+
+/* all 12 CB blocks have these regs */
+#define	CB_COLOR0_BASE					0x28c60
+#define	CB_COLOR0_PITCH					0x28c64
+#define	CB_COLOR0_SLICE					0x28c68
+#define	CB_COLOR0_VIEW					0x28c6c
+#define R_028C6C_CB_COLOR0_VIEW                      0x00028C6C
+#define   S_028C6C_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028C6C_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028C6C_SLICE_START                         0xFFFFF800
+#define   S_028C6C_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028C6C_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028C6C_SLICE_MAX                           0xFF001FFF
+#define R_028C70_CB_COLOR0_INFO                      0x028C70
+#define   S_028C70_ENDIAN(x)                           (((x) & 0x3) << 0)
+#define   G_028C70_ENDIAN(x)                           (((x) >> 0) & 0x3)
+#define   C_028C70_ENDIAN                              0xFFFFFFFC
+#define   S_028C70_FORMAT(x)                           (((x) & 0x3F) << 2)
+#define   G_028C70_FORMAT(x)                           (((x) >> 2) & 0x3F)
+#define   C_028C70_FORMAT                              0xFFFFFF03
+#define     V_028C70_COLOR_INVALID                     0x00000000
+#define     V_028C70_COLOR_8                           0x00000001
+#define     V_028C70_COLOR_4_4                         0x00000002
+#define     V_028C70_COLOR_3_3_2                       0x00000003
+#define     V_028C70_COLOR_16                          0x00000005
+#define     V_028C70_COLOR_16_FLOAT                    0x00000006
+#define     V_028C70_COLOR_8_8                         0x00000007
+#define     V_028C70_COLOR_5_6_5                       0x00000008
+#define     V_028C70_COLOR_6_5_5                       0x00000009
+#define     V_028C70_COLOR_1_5_5_5                     0x0000000A
+#define     V_028C70_COLOR_4_4_4_4                     0x0000000B
+#define     V_028C70_COLOR_5_5_5_1                     0x0000000C
+#define     V_028C70_COLOR_32                          0x0000000D
+#define     V_028C70_COLOR_32_FLOAT                    0x0000000E
+#define     V_028C70_COLOR_16_16                       0x0000000F
+#define     V_028C70_COLOR_16_16_FLOAT                 0x00000010
+#define     V_028C70_COLOR_8_24                        0x00000011
+#define     V_028C70_COLOR_8_24_FLOAT                  0x00000012
+#define     V_028C70_COLOR_24_8                        0x00000013
+#define     V_028C70_COLOR_24_8_FLOAT                  0x00000014
+#define     V_028C70_COLOR_10_11_11                    0x00000015
+#define     V_028C70_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_028C70_COLOR_11_11_10                    0x00000017
+#define     V_028C70_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_028C70_COLOR_2_10_10_10                  0x00000019
+#define     V_028C70_COLOR_8_8_8_8                     0x0000001A
+#define     V_028C70_COLOR_10_10_10_2                  0x0000001B
+#define     V_028C70_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_028C70_COLOR_32_32                       0x0000001D
+#define     V_028C70_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_028C70_COLOR_16_16_16_16                 0x0000001F
+#define     V_028C70_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_028C70_COLOR_32_32_32_32                 0x00000022
+#define     V_028C70_COLOR_32_32_32_32_FLOAT           0x00000023
+#define     V_028C70_COLOR_32_32_32_FLOAT              0x00000030
+#define   S_028C70_ARRAY_MODE(x)                       (((x) & 0xF) << 8)
+#define   G_028C70_ARRAY_MODE(x)                       (((x) >> 8) & 0xF)
+#define   C_028C70_ARRAY_MODE                          0xFFFFF0FF
+#define     V_028C70_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_028C70_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_028C70_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_028C70_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_028C70_NUMBER_TYPE(x)                      (((x) & 0x7) << 12)
+#define   G_028C70_NUMBER_TYPE(x)                      (((x) >> 12) & 0x7)
+#define   C_028C70_NUMBER_TYPE                         0xFFFF8FFF
+#define     V_028C70_NUMBER_UNORM                      0x00000000
+#define     V_028C70_NUMBER_SNORM                      0x00000001
+#define     V_028C70_NUMBER_USCALED                    0x00000002
+#define     V_028C70_NUMBER_SSCALED                    0x00000003
+#define     V_028C70_NUMBER_UINT                       0x00000004
+#define     V_028C70_NUMBER_SINT                       0x00000005
+#define     V_028C70_NUMBER_SRGB                       0x00000006
+#define     V_028C70_NUMBER_FLOAT                      0x00000007
+#define   S_028C70_COMP_SWAP(x)                        (((x) & 0x3) << 15)
+#define   G_028C70_COMP_SWAP(x)                        (((x) >> 15) & 0x3)
+#define   C_028C70_COMP_SWAP                           0xFFFE7FFF
+#define     V_028C70_SWAP_STD                          0x00000000
+#define     V_028C70_SWAP_ALT                          0x00000001
+#define     V_028C70_SWAP_STD_REV                      0x00000002
+#define     V_028C70_SWAP_ALT_REV                      0x00000003
+#define   S_028C70_FAST_CLEAR(x)                       (((x) & 0x1) << 17)
+#define   G_028C70_FAST_CLEAR(x)                       (((x) >> 17) & 0x1)
+#define   C_028C70_FAST_CLEAR                          0xFFFDFFFF
+#define   S_028C70_COMPRESSION(x)                      (((x) & 0x3) << 18)
+#define   G_028C70_COMPRESSION(x)                      (((x) >> 18) & 0x3)
+#define   C_028C70_COMPRESSION                         0xFFF3FFFF
+#define   S_028C70_BLEND_CLAMP(x)                      (((x) & 0x1) << 19)
+#define   G_028C70_BLEND_CLAMP(x)                      (((x) >> 19) & 0x1)
+#define   C_028C70_BLEND_CLAMP                         0xFFF7FFFF
+#define   S_028C70_BLEND_BYPASS(x)                     (((x) & 0x1) << 20)
+#define   G_028C70_BLEND_BYPASS(x)                     (((x) >> 20) & 0x1)
+#define   C_028C70_BLEND_BYPASS                        0xFFEFFFFF
+#define   S_028C70_SIMPLE_FLOAT(x)                     (((x) & 0x1) << 21)
+#define   G_028C70_SIMPLE_FLOAT(x)                     (((x) >> 21) & 0x1)
+#define   C_028C70_SIMPLE_FLOAT                        0xFFDFFFFF
+#define   S_028C70_ROUND_MODE(x)                       (((x) & 0x1) << 22)
+#define   G_028C70_ROUND_MODE(x)                       (((x) >> 22) & 0x1)
+#define   C_028C70_ROUND_MODE                          0xFFBFFFFF
+#define   S_028C70_TILE_COMPACT(x)                     (((x) & 0x1) << 23)
+#define   G_028C70_TILE_COMPACT(x)                     (((x) >> 23) & 0x1)
+#define   C_028C70_TILE_COMPACT                        0xFF7FFFFF
+#define   S_028C70_SOURCE_FORMAT(x)                    (((x) & 0x3) << 24)
+#define   G_028C70_SOURCE_FORMAT(x)                    (((x) >> 24) & 0x3)
+#define   C_028C70_SOURCE_FORMAT                       0xFCFFFFFF
+#define     V_028C70_EXPORT_4C_32BPC                   0x0
+#define     V_028C70_EXPORT_4C_16BPC                   0x1
+#define     V_028C70_EXPORT_2C_32BPC                   0x2 /* Do not use */
+#define   S_028C70_RAT(x)                              (((x) & 0x1) << 26)
+#define   G_028C70_RAT(x)                              (((x) >> 26) & 0x1)
+#define   C_028C70_RAT                                 0xFBFFFFFF
+#define   S_028C70_RESOURCE_TYPE(x)                    (((x) & 0x7) << 27)
+#define   G_028C70_RESOURCE_TYPE(x)                    (((x) >> 27) & 0x7)
+#define   C_028C70_RESOURCE_TYPE                       0xC7FFFFFF
+
+#define	CB_COLOR0_INFO					0x28c70
+#	define CB_FORMAT(x)				((x) << 2)
+#       define CB_ARRAY_MODE(x)                         ((x) << 8)
+#       define ARRAY_LINEAR_GENERAL                     0
+#       define ARRAY_LINEAR_ALIGNED                     1
+#       define ARRAY_1D_TILED_THIN1                     2
+#       define ARRAY_2D_TILED_THIN1                     4
+#	define CB_SOURCE_FORMAT(x)			((x) << 24)
+#	define CB_SF_EXPORT_FULL			0
+#	define CB_SF_EXPORT_NORM			1
+#define R_028C74_CB_COLOR0_ATTRIB                      0x028C74
+#define   S_028C74_NON_DISP_TILING_ORDER(x)            (((x) & 0x1) << 4)
+#define   G_028C74_NON_DISP_TILING_ORDER(x)            (((x) >> 4) & 0x1)
+#define   C_028C74_NON_DISP_TILING_ORDER               0xFFFFFFEF
+#define   S_028C74_TILE_SPLIT(x)                       (((x) & 0xf) << 5)
+#define   G_028C74_TILE_SPLIT(x)                       (((x) >> 5) & 0xf)
+#define   S_028C74_NUM_BANKS(x)                        (((x) & 0x3) << 10)
+#define   G_028C74_NUM_BANKS(x)                        (((x) >> 10) & 0x3)
+#define   S_028C74_BANK_WIDTH(x)                       (((x) & 0x3) << 13)
+#define   G_028C74_BANK_WIDTH(x)                       (((x) >> 13) & 0x3)
+#define   S_028C74_BANK_HEIGHT(x)                      (((x) & 0x3) << 16)
+#define   G_028C74_BANK_HEIGHT(x)                      (((x) >> 16) & 0x3)
+#define   S_028C74_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 19)
+#define   G_028C74_MACRO_TILE_ASPECT(x)                (((x) >> 19) & 0x3)
+#define	CB_COLOR0_ATTRIB				0x28c74
+#       define CB_TILE_SPLIT(x)                         (((x) & 0x7) << 5)
+#       define ADDR_SURF_TILE_SPLIT_64B                 0
+#       define ADDR_SURF_TILE_SPLIT_128B                1
+#       define ADDR_SURF_TILE_SPLIT_256B                2
+#       define ADDR_SURF_TILE_SPLIT_512B                3
+#       define ADDR_SURF_TILE_SPLIT_1KB                 4
+#       define ADDR_SURF_TILE_SPLIT_2KB                 5
+#       define ADDR_SURF_TILE_SPLIT_4KB                 6
+#       define CB_NUM_BANKS(x)                          (((x) & 0x3) << 10)
+#       define ADDR_SURF_2_BANK                         0
+#       define ADDR_SURF_4_BANK                         1
+#       define ADDR_SURF_8_BANK                         2
+#       define ADDR_SURF_16_BANK                        3
+#       define CB_BANK_WIDTH(x)                         (((x) & 0x3) << 13)
+#       define ADDR_SURF_BANK_WIDTH_1                   0
+#       define ADDR_SURF_BANK_WIDTH_2                   1
+#       define ADDR_SURF_BANK_WIDTH_4                   2
+#       define ADDR_SURF_BANK_WIDTH_8                   3
+#       define CB_BANK_HEIGHT(x)                        (((x) & 0x3) << 16)
+#       define ADDR_SURF_BANK_HEIGHT_1                  0
+#       define ADDR_SURF_BANK_HEIGHT_2                  1
+#       define ADDR_SURF_BANK_HEIGHT_4                  2
+#       define ADDR_SURF_BANK_HEIGHT_8                  3
+#       define CB_MACRO_TILE_ASPECT(x)                  (((x) & 0x3) << 19)
+#define	CB_COLOR0_DIM					0x28c78
+/* only CB0-7 blocks have these regs */
+#define	CB_COLOR0_CMASK					0x28c7c
+#define	CB_COLOR0_CMASK_SLICE				0x28c80
+#define	CB_COLOR0_FMASK					0x28c84
+#define	CB_COLOR0_FMASK_SLICE				0x28c88
+#define	CB_COLOR0_CLEAR_WORD0				0x28c8c
+#define	CB_COLOR0_CLEAR_WORD1				0x28c90
+#define	CB_COLOR0_CLEAR_WORD2				0x28c94
+#define	CB_COLOR0_CLEAR_WORD3				0x28c98
+
+#define	CB_COLOR1_BASE					0x28c9c
+#define	CB_COLOR2_BASE					0x28cd8
+#define	CB_COLOR3_BASE					0x28d14
+#define	CB_COLOR4_BASE					0x28d50
+#define	CB_COLOR5_BASE					0x28d8c
+#define	CB_COLOR6_BASE					0x28dc8
+#define	CB_COLOR7_BASE					0x28e04
+#define	CB_COLOR8_BASE					0x28e40
+#define	CB_COLOR9_BASE					0x28e5c
+#define	CB_COLOR10_BASE					0x28e78
+#define	CB_COLOR11_BASE					0x28e94
+
+#define	CB_COLOR1_PITCH					0x28ca0
+#define	CB_COLOR2_PITCH					0x28cdc
+#define	CB_COLOR3_PITCH					0x28d18
+#define	CB_COLOR4_PITCH					0x28d54
+#define	CB_COLOR5_PITCH					0x28d90
+#define	CB_COLOR6_PITCH					0x28dcc
+#define	CB_COLOR7_PITCH					0x28e08
+#define	CB_COLOR8_PITCH					0x28e44
+#define	CB_COLOR9_PITCH					0x28e60
+#define	CB_COLOR10_PITCH				0x28e7c
+#define	CB_COLOR11_PITCH				0x28e98
+
+#define	CB_COLOR1_SLICE					0x28ca4
+#define	CB_COLOR2_SLICE					0x28ce0
+#define	CB_COLOR3_SLICE					0x28d1c
+#define	CB_COLOR4_SLICE					0x28d58
+#define	CB_COLOR5_SLICE					0x28d94
+#define	CB_COLOR6_SLICE					0x28dd0
+#define	CB_COLOR7_SLICE					0x28e0c
+#define	CB_COLOR8_SLICE					0x28e48
+#define	CB_COLOR9_SLICE					0x28e64
+#define	CB_COLOR10_SLICE				0x28e80
+#define	CB_COLOR11_SLICE				0x28e9c
+
+#define	CB_COLOR1_VIEW					0x28ca8
+#define	CB_COLOR2_VIEW					0x28ce4
+#define	CB_COLOR3_VIEW					0x28d20
+#define	CB_COLOR4_VIEW					0x28d5c
+#define	CB_COLOR5_VIEW					0x28d98
+#define	CB_COLOR6_VIEW					0x28dd4
+#define	CB_COLOR7_VIEW					0x28e10
+#define	CB_COLOR8_VIEW					0x28e4c
+#define	CB_COLOR9_VIEW					0x28e68
+#define	CB_COLOR10_VIEW					0x28e84
+#define	CB_COLOR11_VIEW					0x28ea0
+
+#define	CB_COLOR1_INFO					0x28cac
+#define	CB_COLOR2_INFO					0x28ce8
+#define	CB_COLOR3_INFO					0x28d24
+#define	CB_COLOR4_INFO					0x28d60
+#define	CB_COLOR5_INFO					0x28d9c
+#define	CB_COLOR6_INFO					0x28dd8
+#define	CB_COLOR7_INFO					0x28e14
+#define	CB_COLOR8_INFO					0x28e50
+#define	CB_COLOR9_INFO					0x28e6c
+#define	CB_COLOR10_INFO					0x28e88
+#define	CB_COLOR11_INFO					0x28ea4
+
+#define	CB_COLOR1_ATTRIB				0x28cb0
+#define	CB_COLOR2_ATTRIB				0x28cec
+#define	CB_COLOR3_ATTRIB				0x28d28
+#define	CB_COLOR4_ATTRIB				0x28d64
+#define	CB_COLOR5_ATTRIB				0x28da0
+#define	CB_COLOR6_ATTRIB				0x28ddc
+#define	CB_COLOR7_ATTRIB				0x28e18
+#define	CB_COLOR8_ATTRIB				0x28e54
+#define	CB_COLOR9_ATTRIB				0x28e70
+#define	CB_COLOR10_ATTRIB				0x28e8c
+#define	CB_COLOR11_ATTRIB				0x28ea8
+
+#define	CB_COLOR1_DIM					0x28cb4
+#define	CB_COLOR2_DIM					0x28cf0
+#define	CB_COLOR3_DIM					0x28d2c
+#define	CB_COLOR4_DIM					0x28d68
+#define	CB_COLOR5_DIM					0x28da4
+#define	CB_COLOR6_DIM					0x28de0
+#define	CB_COLOR7_DIM					0x28e1c
+#define	CB_COLOR8_DIM					0x28e58
+#define	CB_COLOR9_DIM					0x28e74
+#define	CB_COLOR10_DIM					0x28e90
+#define	CB_COLOR11_DIM					0x28eac
+
+#define	CB_COLOR1_CMASK					0x28cb8
+#define	CB_COLOR2_CMASK					0x28cf4
+#define	CB_COLOR3_CMASK					0x28d30
+#define	CB_COLOR4_CMASK					0x28d6c
+#define	CB_COLOR5_CMASK					0x28da8
+#define	CB_COLOR6_CMASK					0x28de4
+#define	CB_COLOR7_CMASK					0x28e20
+
+#define	CB_COLOR1_CMASK_SLICE				0x28cbc
+#define	CB_COLOR2_CMASK_SLICE				0x28cf8
+#define	CB_COLOR3_CMASK_SLICE				0x28d34
+#define	CB_COLOR4_CMASK_SLICE				0x28d70
+#define	CB_COLOR5_CMASK_SLICE				0x28dac
+#define	CB_COLOR6_CMASK_SLICE				0x28de8
+#define	CB_COLOR7_CMASK_SLICE				0x28e24
+
+#define	CB_COLOR1_FMASK					0x28cc0
+#define	CB_COLOR2_FMASK					0x28cfc
+#define	CB_COLOR3_FMASK					0x28d38
+#define	CB_COLOR4_FMASK					0x28d74
+#define	CB_COLOR5_FMASK					0x28db0
+#define	CB_COLOR6_FMASK					0x28dec
+#define	CB_COLOR7_FMASK					0x28e28
+
+#define	CB_COLOR1_FMASK_SLICE				0x28cc4
+#define	CB_COLOR2_FMASK_SLICE				0x28d00
+#define	CB_COLOR3_FMASK_SLICE				0x28d3c
+#define	CB_COLOR4_FMASK_SLICE				0x28d78
+#define	CB_COLOR5_FMASK_SLICE				0x28db4
+#define	CB_COLOR6_FMASK_SLICE				0x28df0
+#define	CB_COLOR7_FMASK_SLICE				0x28e2c
+
+#define	CB_COLOR1_CLEAR_WORD0				0x28cc8
+#define	CB_COLOR2_CLEAR_WORD0				0x28d04
+#define	CB_COLOR3_CLEAR_WORD0				0x28d40
+#define	CB_COLOR4_CLEAR_WORD0				0x28d7c
+#define	CB_COLOR5_CLEAR_WORD0				0x28db8
+#define	CB_COLOR6_CLEAR_WORD0				0x28df4
+#define	CB_COLOR7_CLEAR_WORD0				0x28e30
+
+#define	CB_COLOR1_CLEAR_WORD1				0x28ccc
+#define	CB_COLOR2_CLEAR_WORD1				0x28d08
+#define	CB_COLOR3_CLEAR_WORD1				0x28d44
+#define	CB_COLOR4_CLEAR_WORD1				0x28d80
+#define	CB_COLOR5_CLEAR_WORD1				0x28dbc
+#define	CB_COLOR6_CLEAR_WORD1				0x28df8
+#define	CB_COLOR7_CLEAR_WORD1				0x28e34
+
+#define	CB_COLOR1_CLEAR_WORD2				0x28cd0
+#define	CB_COLOR2_CLEAR_WORD2				0x28d0c
+#define	CB_COLOR3_CLEAR_WORD2				0x28d48
+#define	CB_COLOR4_CLEAR_WORD2				0x28d84
+#define	CB_COLOR5_CLEAR_WORD2				0x28dc0
+#define	CB_COLOR6_CLEAR_WORD2				0x28dfc
+#define	CB_COLOR7_CLEAR_WORD2				0x28e38
+
+#define	CB_COLOR1_CLEAR_WORD3				0x28cd4
+#define	CB_COLOR2_CLEAR_WORD3				0x28d10
+#define	CB_COLOR3_CLEAR_WORD3				0x28d4c
+#define	CB_COLOR4_CLEAR_WORD3				0x28d88
+#define	CB_COLOR5_CLEAR_WORD3				0x28dc4
+#define	CB_COLOR6_CLEAR_WORD3				0x28e00
+#define	CB_COLOR7_CLEAR_WORD3				0x28e3c
+
+#define SQ_TEX_RESOURCE_WORD0_0                         0x30000
+#	define TEX_DIM(x)				((x) << 0)
+#	define SQ_TEX_DIM_1D				0
+#	define SQ_TEX_DIM_2D				1
+#	define SQ_TEX_DIM_3D				2
+#	define SQ_TEX_DIM_CUBEMAP			3
+#	define SQ_TEX_DIM_1D_ARRAY			4
+#	define SQ_TEX_DIM_2D_ARRAY			5
+#	define SQ_TEX_DIM_2D_MSAA			6
+#	define SQ_TEX_DIM_2D_ARRAY_MSAA			7
+#define SQ_TEX_RESOURCE_WORD1_0                         0x30004
+#       define TEX_ARRAY_MODE(x)                        ((x) << 28)
+#define SQ_TEX_RESOURCE_WORD2_0                         0x30008
+#define SQ_TEX_RESOURCE_WORD3_0                         0x3000C
+#define SQ_TEX_RESOURCE_WORD4_0                         0x30010
+#	define TEX_DST_SEL_X(x)				((x) << 16)
+#	define TEX_DST_SEL_Y(x)				((x) << 19)
+#	define TEX_DST_SEL_Z(x)				((x) << 22)
+#	define TEX_DST_SEL_W(x)				((x) << 25)
+#	define SQ_SEL_X					0
+#	define SQ_SEL_Y					1
+#	define SQ_SEL_Z					2
+#	define SQ_SEL_W					3
+#	define SQ_SEL_0					4
+#	define SQ_SEL_1					5
+#define SQ_TEX_RESOURCE_WORD5_0                         0x30014
+#define SQ_TEX_RESOURCE_WORD6_0                         0x30018
+#       define TEX_TILE_SPLIT(x)                        (((x) & 0x7) << 29)
+#define SQ_TEX_RESOURCE_WORD7_0                         0x3001c
+#       define MACRO_TILE_ASPECT(x)                     (((x) & 0x3) << 6)
+#       define TEX_BANK_WIDTH(x)                        (((x) & 0x3) << 8)
+#       define TEX_BANK_HEIGHT(x)                       (((x) & 0x3) << 10)
+#       define TEX_NUM_BANKS(x)                         (((x) & 0x3) << 16)
+#define R_030000_SQ_TEX_RESOURCE_WORD0_0             0x030000
+#define   S_030000_DIM(x)                              (((x) & 0x7) << 0)
+#define   G_030000_DIM(x)                              (((x) >> 0) & 0x7)
+#define   C_030000_DIM                                 0xFFFFFFF8
+#define     V_030000_SQ_TEX_DIM_1D                     0x00000000
+#define     V_030000_SQ_TEX_DIM_2D                     0x00000001
+#define     V_030000_SQ_TEX_DIM_3D                     0x00000002
+#define     V_030000_SQ_TEX_DIM_CUBEMAP                0x00000003
+#define     V_030000_SQ_TEX_DIM_1D_ARRAY               0x00000004
+#define     V_030000_SQ_TEX_DIM_2D_ARRAY               0x00000005
+#define     V_030000_SQ_TEX_DIM_2D_MSAA                0x00000006
+#define     V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA          0x00000007
+#define   S_030000_NON_DISP_TILING_ORDER(x)            (((x) & 0x1) << 5)
+#define   G_030000_NON_DISP_TILING_ORDER(x)            (((x) >> 5) & 0x1)
+#define   C_030000_NON_DISP_TILING_ORDER               0xFFFFFFDF
+#define   S_030000_PITCH(x)                            (((x) & 0xFFF) << 6)
+#define   G_030000_PITCH(x)                            (((x) >> 6) & 0xFFF)
+#define   C_030000_PITCH                               0xFFFC003F
+#define   S_030000_TEX_WIDTH(x)                        (((x) & 0x3FFF) << 18)
+#define   G_030000_TEX_WIDTH(x)                        (((x) >> 18) & 0x3FFF)
+#define   C_030000_TEX_WIDTH                           0x0003FFFF
+#define R_030004_SQ_TEX_RESOURCE_WORD1_0             0x030004
+#define   S_030004_TEX_HEIGHT(x)                       (((x) & 0x3FFF) << 0)
+#define   G_030004_TEX_HEIGHT(x)                       (((x) >> 0) & 0x3FFF)
+#define   C_030004_TEX_HEIGHT                          0xFFFFC000
+#define   S_030004_TEX_DEPTH(x)                        (((x) & 0x1FFF) << 14)
+#define   G_030004_TEX_DEPTH(x)                        (((x) >> 14) & 0x1FFF)
+#define   C_030004_TEX_DEPTH                           0xF8003FFF
+#define   S_030004_ARRAY_MODE(x)                       (((x) & 0xF) << 28)
+#define   G_030004_ARRAY_MODE(x)                       (((x) >> 28) & 0xF)
+#define   C_030004_ARRAY_MODE                          0x0FFFFFFF
+#define R_030008_SQ_TEX_RESOURCE_WORD2_0             0x030008
+#define   S_030008_BASE_ADDRESS(x)                     (((x) & 0xFFFFFFFF) << 0)
+#define   G_030008_BASE_ADDRESS(x)                     (((x) >> 0) & 0xFFFFFFFF)
+#define   C_030008_BASE_ADDRESS                        0x00000000
+#define R_03000C_SQ_TEX_RESOURCE_WORD3_0             0x03000C
+#define   S_03000C_MIP_ADDRESS(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_03000C_MIP_ADDRESS(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_03000C_MIP_ADDRESS                         0x00000000
+#define R_030010_SQ_TEX_RESOURCE_WORD4_0             0x030010
+#define   S_030010_FORMAT_COMP_X(x)                    (((x) & 0x3) << 0)
+#define   G_030010_FORMAT_COMP_X(x)                    (((x) >> 0) & 0x3)
+#define   C_030010_FORMAT_COMP_X                       0xFFFFFFFC
+#define     V_030010_SQ_FORMAT_COMP_UNSIGNED           0x00000000
+#define     V_030010_SQ_FORMAT_COMP_SIGNED             0x00000001
+#define     V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED    0x00000002
+#define   S_030010_FORMAT_COMP_Y(x)                    (((x) & 0x3) << 2)
+#define   G_030010_FORMAT_COMP_Y(x)                    (((x) >> 2) & 0x3)
+#define   C_030010_FORMAT_COMP_Y                       0xFFFFFFF3
+#define   S_030010_FORMAT_COMP_Z(x)                    (((x) & 0x3) << 4)
+#define   G_030010_FORMAT_COMP_Z(x)                    (((x) >> 4) & 0x3)
+#define   C_030010_FORMAT_COMP_Z                       0xFFFFFFCF
+#define   S_030010_FORMAT_COMP_W(x)                    (((x) & 0x3) << 6)
+#define   G_030010_FORMAT_COMP_W(x)                    (((x) >> 6) & 0x3)
+#define   C_030010_FORMAT_COMP_W                       0xFFFFFF3F
+#define   S_030010_NUM_FORMAT_ALL(x)                   (((x) & 0x3) << 8)
+#define   G_030010_NUM_FORMAT_ALL(x)                   (((x) >> 8) & 0x3)
+#define   C_030010_NUM_FORMAT_ALL                      0xFFFFFCFF
+#define     V_030010_SQ_NUM_FORMAT_NORM                0x00000000
+#define     V_030010_SQ_NUM_FORMAT_INT                 0x00000001
+#define     V_030010_SQ_NUM_FORMAT_SCALED              0x00000002
+#define   S_030010_SRF_MODE_ALL(x)                     (((x) & 0x1) << 10)
+#define   G_030010_SRF_MODE_ALL(x)                     (((x) >> 10) & 0x1)
+#define   C_030010_SRF_MODE_ALL                        0xFFFFFBFF
+#define     V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE     0x00000000
+#define     V_030010_SRF_MODE_NO_ZERO                  0x00000001
+#define   S_030010_FORCE_DEGAMMA(x)                    (((x) & 0x1) << 11)
+#define   G_030010_FORCE_DEGAMMA(x)                    (((x) >> 11) & 0x1)
+#define   C_030010_FORCE_DEGAMMA                       0xFFFFF7FF
+#define   S_030010_ENDIAN_SWAP(x)                      (((x) & 0x3) << 12)
+#define   G_030010_ENDIAN_SWAP(x)                      (((x) >> 12) & 0x3)
+#define   C_030010_ENDIAN_SWAP                         0xFFFFCFFF
+#define   S_030010_DST_SEL_X(x)                        (((x) & 0x7) << 16)
+#define   G_030010_DST_SEL_X(x)                        (((x) >> 16) & 0x7)
+#define   C_030010_DST_SEL_X                           0xFFF8FFFF
+#define     V_030010_SQ_SEL_X                          0x00000000
+#define     V_030010_SQ_SEL_Y                          0x00000001
+#define     V_030010_SQ_SEL_Z                          0x00000002
+#define     V_030010_SQ_SEL_W                          0x00000003
+#define     V_030010_SQ_SEL_0                          0x00000004
+#define     V_030010_SQ_SEL_1                          0x00000005
+#define   S_030010_DST_SEL_Y(x)                        (((x) & 0x7) << 19)
+#define   G_030010_DST_SEL_Y(x)                        (((x) >> 19) & 0x7)
+#define   C_030010_DST_SEL_Y                           0xFFC7FFFF
+#define   S_030010_DST_SEL_Z(x)                        (((x) & 0x7) << 22)
+#define   G_030010_DST_SEL_Z(x)                        (((x) >> 22) & 0x7)
+#define   C_030010_DST_SEL_Z                           0xFE3FFFFF
+#define   S_030010_DST_SEL_W(x)                        (((x) & 0x7) << 25)
+#define   G_030010_DST_SEL_W(x)                        (((x) >> 25) & 0x7)
+#define   C_030010_DST_SEL_W                           0xF1FFFFFF
+#define   S_030010_BASE_LEVEL(x)                       (((x) & 0xF) << 28)
+#define   G_030010_BASE_LEVEL(x)                       (((x) >> 28) & 0xF)
+#define   C_030010_BASE_LEVEL                          0x0FFFFFFF
+#define R_030014_SQ_TEX_RESOURCE_WORD5_0             0x030014
+#define   S_030014_LAST_LEVEL(x)                       (((x) & 0xF) << 0)
+#define   G_030014_LAST_LEVEL(x)                       (((x) >> 0) & 0xF)
+#define   C_030014_LAST_LEVEL                          0xFFFFFFF0
+#define   S_030014_BASE_ARRAY(x)                       (((x) & 0x1FFF) << 4)
+#define   G_030014_BASE_ARRAY(x)                       (((x) >> 4) & 0x1FFF)
+#define   C_030014_BASE_ARRAY                          0xFFFE000F
+#define   S_030014_LAST_ARRAY(x)                       (((x) & 0x1FFF) << 17)
+#define   G_030014_LAST_ARRAY(x)                       (((x) >> 17) & 0x1FFF)
+#define   C_030014_LAST_ARRAY                          0xC001FFFF
+#define R_030018_SQ_TEX_RESOURCE_WORD6_0             0x030018
+#define   S_030018_MAX_ANISO(x)                        (((x) & 0x7) << 0)
+#define   G_030018_MAX_ANISO(x)                        (((x) >> 0) & 0x7)
+#define   C_030018_MAX_ANISO                           0xFFFFFFF8
+#define   S_030018_PERF_MODULATION(x)                  (((x) & 0x7) << 3)
+#define   G_030018_PERF_MODULATION(x)                  (((x) >> 3) & 0x7)
+#define   C_030018_PERF_MODULATION                     0xFFFFFFC7
+#define   S_030018_INTERLACED(x)                       (((x) & 0x1) << 6)
+#define   G_030018_INTERLACED(x)                       (((x) >> 6) & 0x1)
+#define   C_030018_INTERLACED                          0xFFFFFFBF
+#define   S_030018_TILE_SPLIT(x)                       (((x) & 0x7) << 29)
+#define   G_030018_TILE_SPLIT(x)                       (((x) >> 29) & 0x7)
+#define R_03001C_SQ_TEX_RESOURCE_WORD7_0             0x03001C
+#define   S_03001C_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 6)
+#define   G_03001C_MACRO_TILE_ASPECT(x)                (((x) >> 6) & 0x3)
+#define   S_03001C_BANK_WIDTH(x)                       (((x) & 0x3) << 8)
+#define   G_03001C_BANK_WIDTH(x)                       (((x) >> 8) & 0x3)
+#define   S_03001C_BANK_HEIGHT(x)                      (((x) & 0x3) << 10)
+#define   G_03001C_BANK_HEIGHT(x)                      (((x) >> 10) & 0x3)
+#define   S_03001C_NUM_BANKS(x)                        (((x) & 0x3) << 16)
+#define   G_03001C_NUM_BANKS(x)                        (((x) >> 16) & 0x3)
+#define   S_03001C_TYPE(x)                             (((x) & 0x3) << 30)
+#define   G_03001C_TYPE(x)                             (((x) >> 30) & 0x3)
+#define   C_03001C_TYPE                                0x3FFFFFFF
+#define     V_03001C_SQ_TEX_VTX_INVALID_TEXTURE        0x00000000
+#define     V_03001C_SQ_TEX_VTX_INVALID_BUFFER         0x00000001
+#define     V_03001C_SQ_TEX_VTX_VALID_TEXTURE          0x00000002
+#define     V_03001C_SQ_TEX_VTX_VALID_BUFFER           0x00000003
+#define   S_03001C_DATA_FORMAT(x)                      (((x) & 0x3F) << 0)
+#define   G_03001C_DATA_FORMAT(x)                      (((x) >> 0) & 0x3F)
+#define   C_03001C_DATA_FORMAT                         0xFFFFFFC0
+
+#define SQ_VTX_CONSTANT_WORD0_0				0x30000
+#define SQ_VTX_CONSTANT_WORD1_0				0x30004
+#define SQ_VTX_CONSTANT_WORD2_0				0x30008
+#	define SQ_VTXC_BASE_ADDR_HI(x)			((x) << 0)
+#	define SQ_VTXC_STRIDE(x)			((x) << 8)
+#	define SQ_VTXC_ENDIAN_SWAP(x)			((x) << 30)
+#	define SQ_ENDIAN_NONE				0
+#	define SQ_ENDIAN_8IN16				1
+#	define SQ_ENDIAN_8IN32				2
+#define SQ_VTX_CONSTANT_WORD3_0				0x3000C
+#	define SQ_VTCX_SEL_X(x)				((x) << 3)
+#	define SQ_VTCX_SEL_Y(x)				((x) << 6)
+#	define SQ_VTCX_SEL_Z(x)				((x) << 9)
+#	define SQ_VTCX_SEL_W(x)				((x) << 12)
+#define SQ_VTX_CONSTANT_WORD4_0				0x30010
+#define SQ_VTX_CONSTANT_WORD5_0                         0x30014
+#define SQ_VTX_CONSTANT_WORD6_0                         0x30018
+#define SQ_VTX_CONSTANT_WORD7_0                         0x3001c
+
+#define TD_PS_BORDER_COLOR_INDEX                        0xA400
+#define TD_PS_BORDER_COLOR_RED                          0xA404
+#define TD_PS_BORDER_COLOR_GREEN                        0xA408
+#define TD_PS_BORDER_COLOR_BLUE                         0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA                        0xA410
+#define TD_VS_BORDER_COLOR_INDEX                        0xA414
+#define TD_VS_BORDER_COLOR_RED                          0xA418
+#define TD_VS_BORDER_COLOR_GREEN                        0xA41C
+#define TD_VS_BORDER_COLOR_BLUE                         0xA420
+#define TD_VS_BORDER_COLOR_ALPHA                        0xA424
+#define TD_GS_BORDER_COLOR_INDEX                        0xA428
+#define TD_GS_BORDER_COLOR_RED                          0xA42C
+#define TD_GS_BORDER_COLOR_GREEN                        0xA430
+#define TD_GS_BORDER_COLOR_BLUE                         0xA434
+#define TD_GS_BORDER_COLOR_ALPHA                        0xA438
+#define TD_HS_BORDER_COLOR_INDEX                        0xA43C
+#define TD_HS_BORDER_COLOR_RED                          0xA440
+#define TD_HS_BORDER_COLOR_GREEN                        0xA444
+#define TD_HS_BORDER_COLOR_BLUE                         0xA448
+#define TD_HS_BORDER_COLOR_ALPHA                        0xA44C
+#define TD_LS_BORDER_COLOR_INDEX                        0xA450
+#define TD_LS_BORDER_COLOR_RED                          0xA454
+#define TD_LS_BORDER_COLOR_GREEN                        0xA458
+#define TD_LS_BORDER_COLOR_BLUE                         0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA                        0xA460
+#define TD_CS_BORDER_COLOR_INDEX                        0xA464
+#define TD_CS_BORDER_COLOR_RED                          0xA468
+#define TD_CS_BORDER_COLOR_GREEN                        0xA46C
+#define TD_CS_BORDER_COLOR_BLUE                         0xA470
+#define TD_CS_BORDER_COLOR_ALPHA                        0xA474
+
+/* cayman 3D regs */
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE			0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS			0x8E48
+#define CAYMAN_DB_EQAA					0x28804
+#define CAYMAN_DB_DEPTH_INFO				0x2803C
+#define CAYMAN_PA_SC_AA_CONFIG				0x28BE0
+#define         CAYMAN_MSAA_NUM_SAMPLES_SHIFT           0
+#define         CAYMAN_MSAA_NUM_SAMPLES_MASK            0x7
+#define CAYMAN_SX_SCATTER_EXPORT_BASE			0x28358
+/* cayman packet3 addition */
+#define	CAYMAN_PACKET3_DEALLOC_STATE			0x14
+
+/* DMA regs common on r6xx/r7xx/evergreen/ni */
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/evergreend.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/ni.c
===================================================================
--- trunk/sys/dev/drm2/radeon/ni.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/ni.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2040 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/ni.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+#include "cayman_blit_shaders.h"
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+extern void evergreen_mc_program(struct radeon_device *rdev);
+extern void evergreen_irq_suspend(struct radeon_device *rdev);
+extern int evergreen_mc_init(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+#endif
+extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+extern void si_rlc_fini(struct radeon_device *rdev);
+extern int si_rlc_init(struct radeon_device *rdev);
+#endif
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+#define CAYMAN_PFP_UCODE_SIZE 2176
+#define CAYMAN_PM4_UCODE_SIZE 2176
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define CAYMAN_MC_UCODE_SIZE 6037
+
+#define ARUBA_RLC_UCODE_SIZE 1536
+
+#ifdef __linux__
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
+MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
+MODULE_FIRMWARE("radeon/ARUBA_me.bin");
+MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
+#endif
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00916a00}
+};
+
+static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00976b00}
+};
+
+int ni_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	u32 mem_type, running, blackout = 0;
+	const u32 *io_mc_regs;
+	int i, ucode_size, regs_size;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		io_mc_regs = &barts_io_mc_regs[0][0];
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_TURKS:
+		io_mc_regs = &turks_io_mc_regs[0][0];
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_CAICOS:
+	default:
+		io_mc_regs = &caicos_io_mc_regs[0][0];
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_CAYMAN:
+		io_mc_regs = &cayman_io_mc_regs[0][0];
+		ucode_size = CAYMAN_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	}
+
+	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+		if (running) {
+			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+			WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+		}
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+		}
+		/* load the MC ucode */
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+		for (i = 0; i < ucode_size; i++)
+			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
+				break;
+			udelay(1);
+		}
+
+		if (running)
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+	}
+
+	return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		chip_name = "BARTS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_TURKS:
+		chip_name = "TURKS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_CAICOS:
+		chip_name = "CAICOS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_CAYMAN:
+		chip_name = "CAYMAN";
+		rlc_chip_name = "CAYMAN";
+		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
+		mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_ARUBA:
+		chip_name = "ARUBA";
+		rlc_chip_name = "ARUBA";
+		/* pfp/me same size as CAYMAN */
+		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
+		mc_req_size = 0;
+		break;
+	default: panic("%s: Unsupported family %d", __func__, rdev->family);
+	}
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+	err = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name);
+	rdev->pfp_fw = firmware_get(fw_name);
+	if (rdev->pfp_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->pfp_fw->datasize != pfp_req_size) {
+		DRM_ERROR(
+		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->datasize, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name);
+	rdev->me_fw = firmware_get(fw_name);
+	if (rdev->me_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->me_fw->datasize != me_req_size) {
+		DRM_ERROR(
+		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", rlc_chip_name);
+	rdev->rlc_fw = firmware_get(fw_name);
+	if (rdev->rlc_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->rlc_fw->datasize != rlc_req_size) {
+		DRM_ERROR(
+		       "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+
+	/* no MC ucode on TN */
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc", chip_name);
+		rdev->mc_fw = firmware_get(fw_name);
+		if (rdev->mc_fw == NULL) {
+			err = -ENOENT;
+			goto out;
+		}
+		if (rdev->mc_fw->datasize != mc_req_size) {
+			DRM_ERROR(
+			       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->mc_fw->datasize, fw_name);
+			err = -EINVAL;
+		}
+	}
+out:
+	if (err) {
+		if (err != -EINVAL)
+			DRM_ERROR(
+			       "ni_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		if (rdev->pfp_fw != NULL) {
+			firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD);
+			rdev->pfp_fw = NULL;
+		}
+		if (rdev->me_fw != NULL) {
+			firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
+			rdev->me_fw = NULL;
+		}
+		if (rdev->rlc_fw != NULL) {
+			firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD);
+			rdev->rlc_fw = NULL;
+		}
+		if (rdev->mc_fw != NULL) {
+			firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD);
+			rdev->mc_fw = NULL;
+		}
+	}
+	return err;
+}
+
+/**
+ * ni_fini_microcode - drop the firmwares image references
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Drop the pfp, me, mc and rlc firmwares image references.
+ * Called at driver shutdown.
+ */
+void ni_fini_microcode(struct radeon_device *rdev)
+{
+
+	if (rdev->pfp_fw != NULL) {
+		firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD);
+		rdev->pfp_fw = NULL;
+	}
+
+	if (rdev->me_fw != NULL) {
+		firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
+		rdev->me_fw = NULL;
+	}
+
+	if (rdev->rlc_fw != NULL) {
+		firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD);
+		rdev->rlc_fw = NULL;
+	}
+
+	if (rdev->mc_fw != NULL) {
+		firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD);
+		rdev->mc_fw = NULL;
+	}
+}
+
+
+/*
+ * Core functions
+ */
+static void cayman_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 cgts_tcc_disable;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 cgts_sm_ctrl_reg;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+	u32 disabled_rb_mask;
+	int i, j;
+
+	switch (rdev->family) {
+	case CHIP_CAYMAN:
+		rdev->config.cayman.max_shader_engines = 2;
+		rdev->config.cayman.max_pipes_per_simd = 4;
+		rdev->config.cayman.max_tile_pipes = 8;
+		rdev->config.cayman.max_simds_per_se = 12;
+		rdev->config.cayman.max_backends_per_se = 4;
+		rdev->config.cayman.max_texture_channel_caches = 8;
+		rdev->config.cayman.max_gprs = 256;
+		rdev->config.cayman.max_threads = 256;
+		rdev->config.cayman.max_gs_threads = 32;
+		rdev->config.cayman.max_stack_entries = 512;
+		rdev->config.cayman.sx_num_of_sets = 8;
+		rdev->config.cayman.sx_max_export_size = 256;
+		rdev->config.cayman.sx_max_export_pos_size = 64;
+		rdev->config.cayman.sx_max_export_smx_size = 192;
+		rdev->config.cayman.max_hw_contexts = 8;
+		rdev->config.cayman.sq_num_cf_insts = 2;
+
+		rdev->config.cayman.sc_prim_fifo_size = 0x100;
+		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_ARUBA:
+	default:
+		rdev->config.cayman.max_shader_engines = 1;
+		rdev->config.cayman.max_pipes_per_simd = 4;
+		rdev->config.cayman.max_tile_pipes = 2;
+		if ((rdev->ddev->pci_device == 0x9900) ||
+		    (rdev->ddev->pci_device == 0x9901) ||
+		    (rdev->ddev->pci_device == 0x9905) ||
+		    (rdev->ddev->pci_device == 0x9906) ||
+		    (rdev->ddev->pci_device == 0x9907) ||
+		    (rdev->ddev->pci_device == 0x9908) ||
+		    (rdev->ddev->pci_device == 0x9909) ||
+		    (rdev->ddev->pci_device == 0x990B) ||
+		    (rdev->ddev->pci_device == 0x990C) ||
+		    (rdev->ddev->pci_device == 0x990F) ||
+		    (rdev->ddev->pci_device == 0x9910) ||
+		    (rdev->ddev->pci_device == 0x9917) ||
+		    (rdev->ddev->pci_device == 0x9999) ||
+		    (rdev->ddev->pci_device == 0x999C)) {
+			rdev->config.cayman.max_simds_per_se = 6;
+			rdev->config.cayman.max_backends_per_se = 2;
+		} else if ((rdev->ddev->pci_device == 0x9903) ||
+			   (rdev->ddev->pci_device == 0x9904) ||
+			   (rdev->ddev->pci_device == 0x990A) ||
+			   (rdev->ddev->pci_device == 0x990D) ||
+			   (rdev->ddev->pci_device == 0x990E) ||
+			   (rdev->ddev->pci_device == 0x9913) ||
+			   (rdev->ddev->pci_device == 0x9918) ||
+			   (rdev->ddev->pci_device == 0x999D)) {
+			rdev->config.cayman.max_simds_per_se = 4;
+			rdev->config.cayman.max_backends_per_se = 2;
+		} else if ((rdev->ddev->pci_device == 0x9919) ||
+			   (rdev->ddev->pci_device == 0x9990) ||
+			   (rdev->ddev->pci_device == 0x9991) ||
+			   (rdev->ddev->pci_device == 0x9994) ||
+			   (rdev->ddev->pci_device == 0x9995) ||
+			   (rdev->ddev->pci_device == 0x9996) ||
+			   (rdev->ddev->pci_device == 0x999A) ||
+			   (rdev->ddev->pci_device == 0x99A0)) {
+			rdev->config.cayman.max_simds_per_se = 3;
+			rdev->config.cayman.max_backends_per_se = 1;
+		} else {
+			rdev->config.cayman.max_simds_per_se = 2;
+			rdev->config.cayman.max_backends_per_se = 1;
+		}
+		rdev->config.cayman.max_texture_channel_caches = 2;
+		rdev->config.cayman.max_gprs = 256;
+		rdev->config.cayman.max_threads = 256;
+		rdev->config.cayman.max_gs_threads = 32;
+		rdev->config.cayman.max_stack_entries = 512;
+		rdev->config.cayman.sx_num_of_sets = 8;
+		rdev->config.cayman.sx_max_export_size = 256;
+		rdev->config.cayman.sx_max_export_pos_size = 64;
+		rdev->config.cayman.sx_max_export_smx_size = 192;
+		rdev->config.cayman.max_hw_contexts = 8;
+		rdev->config.cayman.sq_num_cf_insts = 2;
+
+		rdev->config.cayman.sc_prim_fifo_size = 0x40;
+		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (rdev->config.cayman.mem_row_size_in_kb > 4)
+		rdev->config.cayman.mem_row_size_in_kb = 4;
+	/* XXX use MC settings? */
+	rdev->config.cayman.shader_engine_tile_size = 32;
+	rdev->config.cayman.num_gpus = 1;
+	rdev->config.cayman.multi_gpu_tile_size = 64;
+
+	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
+	rdev->config.cayman.num_tile_pipes = (1 << tmp);
+	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+	rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
+	rdev->config.cayman.num_shader_engines = tmp + 1;
+	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
+	rdev->config.cayman.num_gpus = tmp + 1;
+	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
+	rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
+	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
+	rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
+
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.cayman.tile_config = 0;
+	switch (rdev->config.cayman.num_tile_pipes) {
+	case 1:
+	default:
+		rdev->config.cayman.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.cayman.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.cayman.tile_config |= (2 << 0);
+		break;
+	case 8:
+		rdev->config.cayman.tile_config |= (3 << 0);
+		break;
+	}
+
+	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->config.cayman.tile_config |= 1 << 4;
+	else {
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
+			rdev->config.cayman.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.cayman.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.cayman.tile_config |= 2 << 4;
+			break;
+		}
+	}
+	rdev->config.cayman.tile_config |=
+		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+	rdev->config.cayman.tile_config |=
+		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+	tmp = 0;
+	for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+		u32 rb_disable_bitmap;
+
+		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+		tmp <<= 4;
+		tmp |= rb_disable_bitmap;
+	}
+	/* enabled rb are just the one not disabled :) */
+	disabled_rb_mask = tmp;
+
+	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	if (ASIC_IS_DCE6(rdev))
+		WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+
+	if ((rdev->config.cayman.max_backends_per_se == 1) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		if ((disabled_rb_mask & 3) == 1) {
+			/* RB0 disabled, RB1 enabled */
+			tmp = 0x11111111;
+		} else {
+			/* RB1 disabled, RB0 enabled */
+			tmp = 0x00000000;
+		}
+	} else {
+		tmp = gb_addr_config & NUM_PIPES_MASK;
+		tmp = r6xx_remap_render_backend(rdev, tmp,
+						rdev->config.cayman.max_backends_per_se *
+						rdev->config.cayman.max_shader_engines,
+						CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+	}
+	WREG32(GB_BACKEND_MAP, tmp);
+
+	cgts_tcc_disable = 0xffff0000;
+	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+		cgts_tcc_disable &= ~(1 << (16 + i));
+	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+
+	/* reprogram the shader complex */
+	cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
+	for (i = 0; i < 16; i++)
+		WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
+	WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
+
+	/* need to be explicitly zero-ed */
+	WREG32(VGT_OFFCHIP_LDS_BASE, 0);
+	WREG32(SQ_LSTMP_RING_BASE, 0);
+	WREG32(SQ_HSTMP_RING_BASE, 0);
+	WREG32(SQ_ESTMP_RING_BASE, 0);
+	WREG32(SQ_GSTMP_RING_BASE, 0);
+	WREG32(SQ_VSTMP_RING_BASE, 0);
+	WREG32(SQ_PSTMP_RING_BASE, 0);
+
+	WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
+
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
+				  FETCH_FIFO_HIWATER(0x4) |
+				  DONE_FIFO_HIWATER(0xe0) |
+				  ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
+	WREG32(SQ_CONFIG, (VC_ENABLE |
+			   EXPORT_SRC_C |
+			   GFX_PRIO(0) |
+			   CS1_PRIO(0) |
+			   CS2_PRIO(1)));
+	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERF_CTR0_SEL_0, 0);
+	WREG32(CB_PERF_CTR0_SEL_1, 0);
+	WREG32(CB_PERF_CTR1_SEL_0, 0);
+	WREG32(CB_PERF_CTR1_SEL_1, 0);
+	WREG32(CB_PERF_CTR2_SEL_0, 0);
+	WREG32(CB_PERF_CTR2_SEL_1, 0);
+	WREG32(CB_PERF_CTR3_SEL_0, 0);
+	WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+}
+
+/*
+ * GART
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* flush hdp cache */
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+static int cayman_pcie_gart_enable(struct radeon_device *rdev)
+{
+	int i, r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	WREG32(0x15D4, 0);
+	WREG32(0x15D8, 0);
+	WREG32(0x15DC, 0);
+
+	/* empty context1-7 */
+	/* Assign the pt base to something valid for now; the pts used for
+	 * the VMs are determined by the application and setup and assigned
+	 * on the fly in the vm part of radeon_gart.c
+	 */
+	for (i = 1; i < 8; i++) {
+		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
+		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+			rdev->gart.table_addr >> 12);
+	}
+
+	/* enable context1-7 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	cayman_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void cayman_pcie_gart_disable(struct radeon_device *rdev)
+{
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void cayman_pcie_gart_fini(struct radeon_device *rdev)
+{
+	cayman_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+			      int ring, u32 cp_int_cntl)
+{
+	u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+	WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+	WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
+/*
+ * CP.
+ */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+			    struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* flush read cache over gart for this vmid */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 10); /* poll interval */
+	/* EVENT_WRITE_EOP - flush caches, send int */
+	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+	radeon_ring_write(ring, addr & 0xffffffff);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	/* set to DX10/11 mode */
+	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(ring, 1);
+
+	if (ring->rptr_save_reg) {
+		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg - 
+					  PACKET3_SET_CONFIG_REG_START) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw | 
+			  (ib->vm ? (ib->vm->id << 24) : 0));
+
+	/* flush read cache over gart for this vmid */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+	radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 10); /* poll interval */
+}
+
+static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+		WREG32(SCRATCH_UMSK, 0);
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	}
+}
+
+static int cayman_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	cayman_cp_enable(rdev, false);
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int cayman_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	cayman_cp_enable(rdev, true);
+
+	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < cayman_default_size; i++)
+		radeon_ring_write(ring, cayman_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	/* SQ_VTX_BASE_VTX_LOC */
+	radeon_ring_write(ring, 0xc0026f00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* Clear consts */
+	radeon_ring_write(ring, 0xc0036f00);
+	radeon_ring_write(ring, 0x00000bc4);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+
+	radeon_ring_write(ring, 0xc0026900);
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /*  */
+
+	radeon_ring_unlock_commit(rdev, ring);
+
+	/* XXX init other rings */
+
+	return 0;
+}
+
+static void cayman_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	cayman_cp_enable(rdev, false);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+static int cayman_cp_resume(struct radeon_device *rdev)
+{
+	static const int ridx[] = {
+		RADEON_RING_TYPE_GFX_INDEX,
+		CAYMAN_RING_TYPE_CP1_INDEX,
+		CAYMAN_RING_TYPE_CP2_INDEX
+	};
+	static const unsigned cp_rb_cntl[] = {
+		CP_RB0_CNTL,
+		CP_RB1_CNTL,
+		CP_RB2_CNTL,
+	};
+	static const unsigned cp_rb_rptr_addr[] = {
+		CP_RB0_RPTR_ADDR,
+		CP_RB1_RPTR_ADDR,
+		CP_RB2_RPTR_ADDR
+	};
+	static const unsigned cp_rb_rptr_addr_hi[] = {
+		CP_RB0_RPTR_ADDR_HI,
+		CP_RB1_RPTR_ADDR_HI,
+		CP_RB2_RPTR_ADDR_HI
+	};
+	static const unsigned cp_rb_base[] = {
+		CP_RB0_BASE,
+		CP_RB1_BASE,
+		CP_RB2_BASE
+	};
+	struct radeon_ring *ring;
+	int i, r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_SH |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SPI |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	WREG32(CP_DEBUG, (1 << 27));
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+	WREG32(SCRATCH_UMSK, 0xff);
+
+	for (i = 0; i < 3; ++i) {
+		uint32_t rb_cntl;
+		uint64_t addr;
+
+		/* Set ring buffer size */
+		ring = &rdev->ring[ridx[i]];
+		rb_cntl = drm_order(ring->ring_size / 8);
+		rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= BUF_SWAP_32BIT;
+#endif
+		WREG32(cp_rb_cntl[i], rb_cntl);
+
+		/* set the wb address whether it's enabled or not */
+		addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
+		WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
+		WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
+	}
+
+	/* set the rb base addr, this causes an internal reset of ALL rings */
+	for (i = 0; i < 3; ++i) {
+		ring = &rdev->ring[ridx[i]];
+		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
+	}
+
+	for (i = 0; i < 3; ++i) {
+		/* Initialize the ring buffer's read and write pointers */
+		ring = &rdev->ring[ridx[i]];
+		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
+
+		ring->rptr = ring->wptr = 0;
+		WREG32(ring->rptr_reg, ring->rptr);
+		WREG32(ring->wptr_reg, ring->wptr);
+
+		mdelay(1);
+		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
+	}
+
+	/* start the rings */
+	cayman_cp_start(rdev);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	/* this only test cp0 */
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	if (r) {
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+		return r;
+	}
+
+	return 0;
+}
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+				struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl;
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	/* dma0 */
+	rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+	/* dma1 */
+	rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 rb_cntl, dma_cntl, ib_cntl;
+	u32 rb_bufsz;
+	u32 reg_offset, wb_offset;
+	int i, r;
+
+	/* Reset dma */
+	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0) {
+			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+			reg_offset = DMA0_REGISTER_OFFSET;
+			wb_offset = R600_WB_DMA_RPTR_OFFSET;
+		} else {
+			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+			reg_offset = DMA1_REGISTER_OFFSET;
+			wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+		}
+
+		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+		/* Set ring buffer size in dwords */
+		rb_bufsz = drm_order(ring->ring_size / 4);
+		rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(DMA_RB_RPTR + reg_offset, 0);
+		WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+		/* set the wb address whether it's enabled or not */
+		WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+		       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+		WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+		       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+		if (rdev->wb.enabled)
+			rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+		WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+		/* enable DMA IBs */
+		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+		ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+		WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+		dma_cntl = RREG32(DMA_CNTL + reg_offset);
+		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+		WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+		ring->wptr = 0;
+		WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+		ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+		ring->ready = true;
+
+		r = radeon_ring_test(rdev, ring->idx, ring);
+		if (r) {
+			ring->ready = false;
+			return r;
+		}
+	}
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+	cayman_dma_stop(rdev);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
+{
+	u32 grbm_reset = 0;
+
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		return;
+
+	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
+		RREG32(SRBM_STATUS));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		RREG32(CP_STAT));
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+	/* reset all the gfx blocks */
+	grbm_reset = (SOFT_RESET_CP |
+		      SOFT_RESET_CB |
+		      SOFT_RESET_DB |
+		      SOFT_RESET_GDS |
+		      SOFT_RESET_PA |
+		      SOFT_RESET_SC |
+		      SOFT_RESET_SPI |
+		      SOFT_RESET_SH |
+		      SOFT_RESET_SX |
+		      SOFT_RESET_TC |
+		      SOFT_RESET_TA |
+		      SOFT_RESET_VGT |
+		      SOFT_RESET_IA);
+
+	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+	WREG32(GRBM_SOFT_RESET, grbm_reset);
+	(void)RREG32(GRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(GRBM_SOFT_RESET, 0);
+	(void)RREG32(GRBM_SOFT_RESET);
+
+	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
+		RREG32(SRBM_STATUS));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		RREG32(CP_STAT));
+
+}
+
+static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+		return;
+
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+
+	/* dma0 */
+	tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+	/* dma1 */
+	tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+	/* Reset dma */
+	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+
+}
+
+static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+		reset_mask &= ~RADEON_RESET_DMA;
+
+	if (reset_mask == 0)
+		return 0;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(0x14F8));
+	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(0x14D8));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(0x14FC));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(0x14DC));
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+		cayman_gpu_soft_reset_gfx(rdev);
+
+	if (reset_mask & RADEON_RESET_DMA)
+		cayman_gpu_soft_reset_dma(rdev);
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	return 0;
+}
+
+int cayman_asic_reset(struct radeon_device *rdev)
+{
+	return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+					    RADEON_RESET_COMPUTE |
+					    RADEON_RESET_DMA));
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (cayman-SI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 dma_status_reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+	else
+		dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+	if (dma_status_reg & DMA_IDLE) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force ring activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+static int cayman_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* enable pcie gen2 link */
+	evergreen_pcie_gen2_enable(rdev);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+
+		r = ni_mc_load_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load MC firmware!\n");
+			return r;
+		}
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	evergreen_mc_program(rdev);
+	r = cayman_pcie_gart_enable(rdev);
+	if (r)
+		return r;
+	cayman_gpu_init(rdev);
+
+	r = evergreen_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy.copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+
+	/* allocate rlc buffers */
+	if (rdev->flags & RADEON_IS_IGP) {
+		r = si_rlc_init(rdev);
+		if (r) {
+			DRM_ERROR("Failed to init rlc BOs!\n");
+			return r;
+		}
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	evergreen_irq_set(rdev);
+
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     CP_RB0_RPTR, CP_RB0_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = cayman_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = cayman_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = cayman_dma_resume(rdev);
+	if (r)
+		return r;
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_vm_manager_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+int cayman_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	rdev->accel_working = true;
+	r = cayman_startup(rdev);
+	if (r) {
+		DRM_ERROR("cayman startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+	return r;
+}
+
+int cayman_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	cayman_cp_enable(rdev, false);
+	cayman_dma_stop(rdev);
+	evergreen_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	cayman_pcie_gart_disable(rdev);
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int cayman_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize memory controller */
+	r = evergreen_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = cayman_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		cayman_cp_fini(rdev);
+		cayman_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		if (rdev->flags & RADEON_IS_IGP)
+			si_rlc_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_vm_manager_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		cayman_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 *
+	 * We can skip this check for TN, because there is no MC
+	 * ucode.
+	 */
+	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+		DRM_ERROR("radeon: MC ucode required for NI+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void cayman_fini(struct radeon_device *rdev)
+{
+	r600_blit_fini(rdev);
+	cayman_cp_fini(rdev);
+	cayman_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	if (rdev->flags & RADEON_IS_IGP)
+		si_rlc_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	cayman_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	ni_fini_microcode(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+	/* number of VMs */
+	rdev->vm_manager.nvm = 8;
+	/* base offset of vram pages */
+	if (rdev->flags & RADEON_IS_IGP) {
+		u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
+		tmp <<= 22;
+		rdev->vm_manager.vram_base_offset = tmp;
+	} else
+		rdev->vm_manager.vram_base_offset = 0;
+	return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+#define R600_ENTRY_VALID   (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
+{
+	uint32_t r600_flags = 0;
+	r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
+	r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+	r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+	if (flags & RADEON_VM_PAGE_SYSTEM) {
+		r600_flags |= R600_PTE_SYSTEM;
+		r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+	}
+	return r600_flags;
+}
+
+/**
+ * cayman_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (cayman-si).
+ */
+void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+			uint64_t addr, unsigned count,
+			uint32_t incr, uint32_t flags)
+{
+	struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+	uint64_t value;
+	unsigned ndw;
+
+	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+		while (count) {
+			ndw = 1 + count * 2;
+			if (ndw > 0x3FFF)
+				ndw = 0x3FFF;
+
+			radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+			radeon_ring_write(ring, pe);
+			radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+			for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+				if (flags & RADEON_VM_PAGE_SYSTEM) {
+					value = radeon_vm_map_gart(rdev, addr);
+					value &= 0xFFFFFFFFFFFFF000ULL;
+				} else if (flags & RADEON_VM_PAGE_VALID) {
+					value = addr;
+				} else {
+					value = 0;
+				}
+				addr += incr;
+				value |= r600_flags;
+				radeon_ring_write(ring, value);
+				radeon_ring_write(ring, upper_32_bits(value));
+			}
+		}
+	} else {
+		while (count) {
+			ndw = count * 2;
+			if (ndw > 0xFFFFE)
+				ndw = 0xFFFFE;
+
+			/* for non-physically contiguous pages (system) */
+			radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+			radeon_ring_write(ring, pe);
+			radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+				if (flags & RADEON_VM_PAGE_SYSTEM) {
+					value = radeon_vm_map_gart(rdev, addr);
+					value &= 0xFFFFFFFFFFFFF000ULL;
+				} else if (flags & RADEON_VM_PAGE_VALID) {
+					value = addr;
+				} else {
+					value = 0;
+				}
+				addr += incr;
+				value |= r600_flags;
+				radeon_ring_write(ring, value);
+				radeon_ring_write(ring, upper_32_bits(value));
+			}
+		}
+	}
+}
+
+/**
+ * cayman_vm_flush - vm flush using the CP
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using the CP (cayman-si).
+ */
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+	radeon_ring_write(ring, 0x1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
+	radeon_ring_write(ring, 1 << vm->id);
+
+	/* sync PFP to ME, otherwise we might get invalid PFP reads */
+	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+	radeon_ring_write(ring, 0x0);
+}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 1 << vm->id);
+}
+


Property changes on: trunk/sys/dev/drm2/radeon/ni.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/ni_reg.h
===================================================================
--- trunk/sys/dev/drm2/radeon/ni_reg.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/ni_reg.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,90 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/ni_reg.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL                         0x6840
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x68b4
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x68c4
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL                           0x68d4
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x68f0
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x6960
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x6964
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x6a80
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/ni_reg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/nid.h
===================================================================
--- trunk/sys/dev/drm2/radeon/nid.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/nid.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,685 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/nid.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#define CAYMAN_MAX_SH_GPRS           256
+#define CAYMAN_MAX_TEMP_GPRS         16
+#define CAYMAN_MAX_SH_THREADS        256
+#define CAYMAN_MAX_SH_STACK_ENTRIES  4096
+#define CAYMAN_MAX_FRC_EOV_CNT       16384
+#define CAYMAN_MAX_BACKENDS          8
+#define CAYMAN_MAX_BACKENDS_MASK     0xFF
+#define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF
+#define CAYMAN_MAX_SIMDS             16
+#define CAYMAN_MAX_SIMDS_MASK        0xFFFF
+#define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF
+#define CAYMAN_MAX_PIPES             8
+#define CAYMAN_MAX_PIPES_MASK        0xFF
+#define CAYMAN_MAX_LDS_NUM           0xFFFF
+#define CAYMAN_MAX_TCC               16
+#define CAYMAN_MAX_TCC_MASK          0xFF
+
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN       0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN        0x12010001
+
+#define DMIF_ADDR_CONFIG  				0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC  				0xC00
+
+#define	SRBM_GFX_CNTL				        0x0E44
+#define		RINGID(x)					(((x) & 0x3) << 0)
+#define		VMID(x)						(((x) & 0x7) << 0)
+#define	SRBM_STATUS				        0x0E50
+
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_CG				(1 << 2)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_RLC				(1 << 13)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 18)
+/* CONTEXT1_IDENTITY_ACCESS_MODE
+ * 0 physical = logical
+ * 1 logical via context1 page table
+ * 2 inside identity aperture use translation, outside physical = logical
+ * 3 inside identity aperture physical = logical, outside use translation
+ */
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT0_CNTL2				0x1430
+#define VM_CONTEXT1_CNTL2				0x1434
+#define VM_INVALIDATE_REQUEST				0x1478
+#define VM_INVALIDATE_RESPONSE				0x147c
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x151c
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_MX_L1_TLB_CNTL				0x2064
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+#define	FUS_MC_VM_FB_OFFSET				0x2068
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+#define MC_SEQ_MISC0           				0x2a00
+#define		MC_SEQ_MISC0_GDDR5_SHIFT      		28
+#define		MC_SEQ_MISC0_GDDR5_MASK      		0xf0000000
+#define		MC_SEQ_MISC0_GDDR5_VALUE      		5
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL					0x2F4C
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+#define	GC_USER_SYS_RB_BACKEND_DISABLE			0x3F8C
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+
+#define RLC_GFX_INDEX           			0x3FC4
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		RING2_RQ_PENDING				(1 << 4)
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		RING1_RQ_PENDING				(1 << 6)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		SX_CLEAN					(1 << 11)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		VGT_BUSY_NO_DMA					(1 << 16)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		SH_BUSY 					(1 << 21)
+#define		SPI_BUSY					(1 << 22)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1U << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_SX_CLEAN					(1 << 0)
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SH_BUSY					(1 << 28)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1U << 31)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_GDS					(1 << 4)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SH					(1 << 9)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VGT					(1 << 14)
+#define		SOFT_RESET_IA					(1 << 15)
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1U << 31)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+#define	CP_SEM_WAIT_TIMER				0x85BC
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+#define	CP_COHER_CNTL2					0x85E8
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1 << 28)
+#define		CP_PFP_HALT					(1 << 26)
+#define	CP_RB2_RPTR					0x86f8
+#define	CP_RB1_RPTR					0x86fc
+#define	CP_RB0_RPTR					0x8700
+#define	CP_RB_WPTR_DELAY				0x8704
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+#define	CP_PERFMON_CNTL					0x87FC
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+
+#define CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_QD_PIPES_SHIFT				8
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0xFFFF0000
+#define		INACTIVE_SIMDS_SHIFT				16
+
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define	VGT_NUM_INSTANCES				0x8974
+#define VGT_TF_RING_SIZE				0x8988
+#define VGT_OFFCHIP_LDS_BASE				0x89b4
+
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		GFX_PRIO(x)					((x) << 2)
+#define		CS1_PRIO(x)					((x) << 4)
+#define		CS2_PRIO(x)					((x) << 6)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define SQ_ESGS_RING_SIZE				0x8c44
+#define SQ_GSVS_RING_SIZE				0x8c4c
+#define SQ_ESTMP_RING_BASE				0x8c50
+#define SQ_ESTMP_RING_SIZE				0x8c54
+#define SQ_GSTMP_RING_BASE				0x8c58
+#define SQ_GSTMP_RING_SIZE				0x8c5c
+#define SQ_VSTMP_RING_BASE				0x8c60
+#define SQ_VSTMP_RING_SIZE				0x8c64
+#define SQ_PSTMP_RING_BASE				0x8c68
+#define SQ_PSTMP_RING_SIZE				0x8c6c
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define SQ_LSTMP_RING_BASE				0x8e10
+#define SQ_LSTMP_RING_SIZE				0x8e14
+#define SQ_HSTMP_RING_BASE				0x8e18
+#define SQ_HSTMP_RING_SIZE				0x8e1c
+#define	SQ_DYN_GPR_CNTL_PS_FLUSH_REQ    		0x8D8C
+#define		DYN_GPR_ENABLE					(1 << 8)
+#define SQ_CONST_MEM_BASE				0x8df8
+
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define		CRC_SIMD_ID_WADDR_DISABLE			(1 << 8)
+
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_TCC_DISABLE				0x914C
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+#define	CGTS_SM_CTRL_REG				0x9150
+#define		OVERRIDE				(1 << 21)
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+
+#define	TCP_CHAN_STEER_LO				0x960c
+#define	TCP_CHAN_STEER_HI				0x9610
+
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		BANK_INTERLEAVE_SIZE(x)			((x) << 8)
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		NUM_GPUS_MASK				0x00700000
+#define		NUM_GPUS_SHIFT				20
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
+#define		MULTI_GPU_TILE_SIZE_SHIFT		24
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+#define		NUM_LOWER_PIPES(x)			((x) << 30)
+#define		NUM_LOWER_PIPES_MASK			0x40000000
+#define		NUM_LOWER_PIPES_SHIFT			30
+#define GB_BACKEND_MAP  				0x98FC
+
+#define CB_PERF_CTR0_SEL_0				0x9A20
+#define CB_PERF_CTR0_SEL_1				0x9A24
+#define CB_PERF_CTR1_SEL_0				0x9A28
+#define CB_PERF_CTR1_SEL_1				0x9A2C
+#define CB_PERF_CTR2_SEL_0				0x9A30
+#define CB_PERF_CTR2_SEL_1				0x9A34
+#define CB_PERF_CTR3_SEL_0				0x9A38
+#define CB_PERF_CTR3_SEL_1				0x9A3C
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		NUMBER_OF_SETS(x)				((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	CP_RB0_BASE					0xC100
+#define	CP_RB0_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1U << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB0_RPTR_ADDR				0xC10C
+#define	CP_RB0_RPTR_ADDR_HI				0xC110
+#define	CP_RB0_WPTR					0xC114
+
+#define CP_INT_CNTL                                     0xC124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+
+#define	CP_RB1_BASE					0xC180
+#define	CP_RB1_CNTL					0xC184
+#define	CP_RB1_RPTR_ADDR				0xC188
+#define	CP_RB1_RPTR_ADDR_HI				0xC18C
+#define	CP_RB1_WPTR					0xC190
+#define	CP_RB2_BASE					0xC194
+#define	CP_RB2_CNTL					0xC198
+#define	CP_RB2_RPTR_ADDR				0xC19C
+#define	CP_RB2_RPTR_ADDR_HI				0xC1A0
+#define	CP_RB2_WPTR					0xC1A4
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_DEBUG					0xC1FC
+
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
+
+/*
+ * PM4
+ */
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n)	((PACKET_TYPE0 << 30) |				\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((PACKET_TYPE3 << 30) |				\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DEALLOC_STATE				0x14
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_MODE_CONTROL				0x18
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_OFFSET			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_CB8_DEST_BASE_ENA    (1 << 15)
+#              define PACKET3_CB9_DEST_BASE_ENA    (1 << 16)
+#              define PACKET3_CB10_DEST_BASE_ENA   (1 << 17)
+#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 18)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SX_ACTION_ENA        (1 << 28)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - TS events
+		 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_ALU_PS_CONST_BUFFER_COPY		0x4C
+#define	PACKET3_ALU_VS_CONST_BUFFER_COPY		0x4D
+#define	PACKET3_ALU_PS_CONST_UPDATE		        0x4E
+#define	PACKET3_ALU_VS_CONST_UPDATE		        0x4F
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+/* alu const buffers only; no reg file */
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_START			0x0003a500
+#define		PACKET3_SET_BOOL_CONST_END			0x0003a518
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_START			0x0003a200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003a500
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_START			0x00030000
+#define		PACKET3_SET_RESOURCE_END			0x00038000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_START			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003c600
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_START			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003ff0c
+#define	PACKET3_SET_RESOURCE_OFFSET			0x70
+#define	PACKET3_SET_ALU_CONST_VS			0x71
+#define	PACKET3_SET_ALU_CONST_DI			0x72
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_APPEND_CNT			        0x75
+#define	PACKET3_ME_WRITE				0x7A
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1U << 31)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_TILING_CONFIG  				  0xd0b8
+#define DMA_MODE                                          0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/nid.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r100.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r100.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r100.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,4241 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r100.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r100d.h"
+#include "rs100d.h"
+#include "rv200d.h"
+#include "rv250d.h"
+#include "atom.h"
+
+#include "r100_reg_safe.h"
+#include "rn50_reg_safe.h"
+
+/* Firmware Names */
+#define FIRMWARE_R100		"radeonkmsfw_R100_cp"
+#define FIRMWARE_R200		"radeonkmsfw_R200_cp"
+#define FIRMWARE_R300		"radeonkmsfw_R300_cp"
+#define FIRMWARE_R420		"radeonkmsfw_R420_cp"
+#define FIRMWARE_RS690		"radeonkmsfw_RS690_cp"
+#define FIRMWARE_RS600		"radeonkmsfw_RS600_cp"
+#define FIRMWARE_R520		"radeonkmsfw_R520_cp"
+
+#ifdef __linux__
+MODULE_FIRMWARE(FIRMWARE_R100);
+MODULE_FIRMWARE(FIRMWARE_R200);
+MODULE_FIRMWARE(FIRMWARE_R300);
+MODULE_FIRMWARE(FIRMWARE_R420);
+MODULE_FIRMWARE(FIRMWARE_RS690);
+MODULE_FIRMWARE(FIRMWARE_RS600);
+MODULE_FIRMWARE(FIRMWARE_R520);
+#endif
+
+#include "r100_track.h"
+
+/* This files gather functions specifics to:
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * and others in some cases.
+ */
+
+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0) {
+		if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+			return true;
+		else
+			return false;
+	} else {
+		if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+			return true;
+		else
+			return false;
+	}
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 vline1, vline2;
+
+	if (crtc == 0) {
+		vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+		vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+	} else {
+		vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+		vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+	}
+	if (vline1 != vline2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * r100_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r1xx-r4xx).
+ */
+void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (crtc == 0) {
+		if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+			return;
+	} else {
+		if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+			return;
+	}
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (r100_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!r100_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!r100_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!r100_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+/**
+ * r100_pre_page_flip - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (r1xx-r4xx).
+ * Enables the pageflip irq (vblank irq).
+ */
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+/**
+ * r100_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (r1xx-r4xx).
+ * Disables the pageflip irq (vblank irq).
+ */
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+/**
+ * r100_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (r1xx-r4xx).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+	int i;
+
+	/* Lock the graphics update lock */
+	/* update the scanout addresses */
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+}
+
+/**
+ * r100_pm_get_dynpm_state - look up dynpm power state callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Look up the optimal power state based on the
+ * current state of the GPU (r1xx-r5xx).
+ * Used for dynpm only.
+ */
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+	int i;
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+
+	switch (rdev->pm.dynpm_planned_action) {
+	case DYNPM_ACTION_MINIMUM:
+		rdev->pm.requested_power_state_index = 0;
+		rdev->pm.dynpm_can_downclock = false;
+		break;
+	case DYNPM_ACTION_DOWNCLOCK:
+		if (rdev->pm.current_power_state_index == 0) {
+			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+			rdev->pm.dynpm_can_downclock = false;
+		} else {
+			if (rdev->pm.active_crtc_count > 1) {
+				for (i = 0; i < rdev->pm.num_power_states; i++) {
+					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+						continue;
+					else if (i >= rdev->pm.current_power_state_index) {
+						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+						break;
+					} else {
+						rdev->pm.requested_power_state_index = i;
+						break;
+					}
+				}
+			} else
+				rdev->pm.requested_power_state_index =
+					rdev->pm.current_power_state_index - 1;
+		}
+		/* don't use the power state if crtcs are active and no display flag is set */
+		if ((rdev->pm.active_crtc_count > 0) &&
+		    (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+		     RADEON_PM_MODE_NO_DISPLAY)) {
+			rdev->pm.requested_power_state_index++;
+		}
+		break;
+	case DYNPM_ACTION_UPCLOCK:
+		if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+			rdev->pm.dynpm_can_upclock = false;
+		} else {
+			if (rdev->pm.active_crtc_count > 1) {
+				for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+						continue;
+					else if (i <= rdev->pm.current_power_state_index) {
+						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+						break;
+					} else {
+						rdev->pm.requested_power_state_index = i;
+						break;
+					}
+				}
+			} else
+				rdev->pm.requested_power_state_index =
+					rdev->pm.current_power_state_index + 1;
+		}
+		break;
+	case DYNPM_ACTION_DEFAULT:
+		rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+		rdev->pm.dynpm_can_upclock = false;
+		break;
+	case DYNPM_ACTION_NONE:
+	default:
+		DRM_ERROR("Requested mode for not defined action\n");
+		return;
+	}
+	/* only one clock mode per power state */
+	rdev->pm.requested_clock_mode_index = 0;
+
+	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  pcie_lanes);
+}
+
+/**
+ * r100_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (r1xx-r3xx).
+ * Used for profile mode only.
+ */
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+/**
+ * r100_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, pcie lanes, etc.) (r1xx-r4xx).
+ */
+void r100_pm_misc(struct radeon_device *rdev)
+{
+	int requested_index = rdev->pm.requested_power_state_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+	u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp |= voltage->gpio.mask;
+			else
+				tmp &= ~(voltage->gpio.mask);
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		} else {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp &= ~voltage->gpio.mask;
+			else
+				tmp |= voltage->gpio.mask;
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		}
+	}
+
+	sclk_cntl = RREG32_PLL(SCLK_CNTL);
+	sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+	sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+	sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+	sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+		sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+		if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+		else
+			sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+		else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+	} else
+		sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+		sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+		if (voltage->delay) {
+			sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+			switch (voltage->delay) {
+			case 33:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+				break;
+			case 66:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+				break;
+			case 99:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+				break;
+			case 132:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+				break;
+			}
+		} else
+			sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+	} else
+		sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+		sclk_cntl &= ~FORCE_HDP;
+	else
+		sclk_cntl |= FORCE_HDP;
+
+	WREG32_PLL(SCLK_CNTL, sclk_cntl);
+	WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+	WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+	/* set pcie lanes */
+	if ((rdev->flags & RADEON_IS_PCIE) &&
+	    !(rdev->flags & RADEON_IS_IGP) &&
+	    rdev->asic->pm.set_pcie_lanes &&
+	    (ps->pcie_lanes !=
+	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+		radeon_set_pcie_lanes(rdev,
+				      ps->pcie_lanes);
+		DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
+	}
+}
+
+/**
+ * r100_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (r1xx-r4xx).
+ */
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			if (radeon_crtc->crtc_id) {
+				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+				tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+			} else {
+				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+				tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+			}
+		}
+	}
+}
+
+/**
+ * r100_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (r1xx-r4xx).
+ */
+void r100_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			if (radeon_crtc->crtc_id) {
+				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+				tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+			} else {
+				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+				tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+			}
+		}
+	}
+}
+
+/**
+ * r100_gui_idle - gui idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
+ * Returns true if idle, false if not.
+ */
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+		return false;
+	else
+		return true;
+}
+
+/* hpd for digital panel detect/disconnect */
+/**
+ * r100_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (r1xx-r4xx).
+ * Returns true if connected, false if not connected.
+ */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+/**
+ * r100_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (r1xx-r4xx).
+ */
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r100_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(RADEON_FP_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP_DETECT_INT_POL;
+		WREG32(RADEON_FP_GEN_CNTL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(RADEON_FP2_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP2_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP2_DETECT_INT_POL;
+		WREG32(RADEON_FP2_GEN_CNTL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * r100_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (r1xx-r4xx).
+ * Set the polarity, and enable the hpd interrupts.
+ */
+void r100_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+/**
+ * r100_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (r1xx-r4xx).
+ * Disable the hpd interrupts.
+ */
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+/*
+ * PCI GART
+ */
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* TODO: can we do somethings here ? */
+	/* It seems hw only cache one entry so we should discard this
+	 * entry otherwise if first GPU GART read hit this entry it
+	 * could end up in wrong address. */
+}
+
+int r100_pci_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.ptr) {
+		DRM_ERROR("R100 PCI GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+	rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+	return radeon_gart_table_ram_alloc(rdev);
+}
+
+int r100_pci_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	radeon_gart_restore(rdev);
+	/* discard memory request outside of configured range */
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32(RADEON_AIC_CNTL, tmp);
+	/* set address range for PCI address translate */
+	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
+	WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
+	/* set PCI GART page-table base address */
+	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
+	WREG32(RADEON_AIC_CNTL, tmp);
+	r100_pci_gart_tlb_flush(rdev);
+	DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void r100_pci_gart_disable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	/* discard memory request outside of configured range */
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+	WREG32(RADEON_AIC_LO_ADDR, 0);
+	WREG32(RADEON_AIC_HI_ADDR, 0);
+}
+
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+	u32 *gtt = rdev->gart.ptr;
+
+	if (i < 0 || i > rdev->gart.num_gpu_pages) {
+		return -EINVAL;
+	}
+	gtt[i] = cpu_to_le32(lower_32_bits(addr));
+	return 0;
+}
+
+void r100_pci_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	r100_pci_gart_disable(rdev);
+	radeon_gart_table_ram_free(rdev);
+}
+
+int r100_irq_set(struct radeon_device *rdev)
+{
+	uint32_t tmp = 0;
+
+	if (!rdev->irq.installed) {
+		DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n");
+		WREG32(R_000040_GEN_INT_CNTL, 0);
+		return -EINVAL;
+	}
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		tmp |= RADEON_SW_INT_ENABLE;
+	}
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		tmp |= RADEON_CRTC_VBLANK_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		tmp |= RADEON_CRTC2_VBLANK_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		tmp |= RADEON_FP_DETECT_MASK;
+	}
+	if (rdev->irq.hpd[1]) {
+		tmp |= RADEON_FP2_DETECT_MASK;
+	}
+	WREG32(RADEON_GEN_INT_CNTL, tmp);
+	return 0;
+}
+
+void r100_irq_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(R_000040_GEN_INT_CNTL, 0);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	tmp = RREG32(R_000044_GEN_INT_STATUS);
+	WREG32(R_000044_GEN_INT_STATUS, tmp);
+}
+
+static uint32_t r100_irq_ack(struct radeon_device *rdev)
+{
+	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+	uint32_t irq_mask = RADEON_SW_INT_TEST |
+		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+
+	if (irqs) {
+		WREG32(RADEON_GEN_INT_STATUS, irqs);
+	}
+	return irqs & irq_mask;
+}
+
+irqreturn_t r100_irq_process(struct radeon_device *rdev)
+{
+	uint32_t status, msi_rearm;
+	bool queue_hotplug = false;
+
+	status = r100_irq_ack(rdev);
+	if (!status) {
+		return IRQ_NONE;
+	}
+	if (rdev->shutdown) {
+		return IRQ_NONE;
+	}
+	while (status) {
+		/* SW interrupt */
+		if (status & RADEON_SW_INT_TEST) {
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+		}
+		/* Vertical blank interrupts */
+		if (status & RADEON_CRTC_VBLANK_STAT) {
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				DRM_WAKEUP(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[0]))
+				radeon_crtc_handle_flip(rdev, 0);
+		}
+		if (status & RADEON_CRTC2_VBLANK_STAT) {
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				DRM_WAKEUP(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[1]))
+				radeon_crtc_handle_flip(rdev, 1);
+		}
+		if (status & RADEON_FP_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (status & RADEON_FP2_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
+		status = r100_irq_ack(rdev);
+	}
+	if (queue_hotplug)
+		taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
+	if (rdev->msi_enabled) {
+		switch (rdev->family) {
+		case CHIP_RS400:
+		case CHIP_RS480:
+			msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
+			WREG32(RADEON_AIC_CNTL, msi_rearm);
+			WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
+			break;
+		default:
+			WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+			break;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0)
+		return RREG32(RADEON_CRTC_CRNT_FRAME);
+	else
+		return RREG32(RADEON_CRTC2_CRNT_FRAME);
+}
+
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
+void r100_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+	/* We have to make sure that caches are flushed before
+	 * CPU might read something from VRAM. */
+	radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+	radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
+	/* Wait until IDLE & CLEAN */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+				RADEON_HDP_READ_BUFFER_INVALIDATE);
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+	/* Emit fence sequence & fire IRQ */
+	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *ring,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait)
+{
+	/* Unused on older asics, since we don't have semaphores or multiple rings */
+	panic("%s: Unused on older asics", __func__);
+}
+
+int r100_copy_blit(struct radeon_device *rdev,
+		   uint64_t src_offset,
+		   uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t cur_pages;
+	uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
+	uint32_t pitch;
+	uint32_t stride_pixels;
+	unsigned ndw;
+	int num_loops;
+	int r = 0;
+
+	/* radeon limited to 16k stride */
+	stride_bytes &= 0x3fff;
+	/* radeon pitch is /64 */
+	pitch = stride_bytes / 64;
+	stride_pixels = stride_bytes / 4;
+	num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
+
+	/* Ask for enough room for blit + flush + fence */
+	ndw = 64 + (10 * num_loops);
+	r = radeon_ring_lock(rdev, ring, ndw);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
+		return -EINVAL;
+	}
+	while (num_gpu_pages > 0) {
+		cur_pages = num_gpu_pages;
+		if (cur_pages > 8191) {
+			cur_pages = 8191;
+		}
+		num_gpu_pages -= cur_pages;
+
+		/* pages are in Y direction - height
+		   page width in X direction - width */
+		radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+		radeon_ring_write(ring,
+				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+				  RADEON_GMC_SRC_CLIPPING |
+				  RADEON_GMC_DST_CLIPPING |
+				  RADEON_GMC_BRUSH_NONE |
+				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
+				  RADEON_GMC_SRC_DATATYPE_COLOR |
+				  RADEON_ROP3_S |
+				  RADEON_DP_SRC_SOURCE_MEMORY |
+				  RADEON_GMC_CLR_CMP_CNTL_DIS |
+				  RADEON_GMC_WR_MSK_DIS);
+		radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+		radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+		radeon_ring_write(ring, num_gpu_pages);
+		radeon_ring_write(ring, num_gpu_pages);
+		radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_HOST_IDLECLEAN |
+			  RADEON_WAIT_DMA_GUI_IDLE);
+	if (fence) {
+		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+	}
+	radeon_ring_unlock_commit(rdev, ring);
+	return r;
+}
+
+static int r100_cp_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(R_000E40_RBBM_STATUS);
+		if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
+			return 0;
+		}
+		udelay(1);
+	}
+	return -1;
+}
+
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+
+	r = radeon_ring_lock(rdev, ring, 2);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  RADEON_ISYNC_ANY2D_IDLE3D |
+			  RADEON_ISYNC_ANY3D_IDLE2D |
+			  RADEON_ISYNC_WAIT_IDLEGUI |
+			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_unlock_commit(rdev, ring);
+}
+
+
+/* Load the microcode for the CP */
+static int r100_cp_init_microcode(struct radeon_device *rdev)
+{
+	const char *fw_name = NULL;
+	int err;
+
+	DRM_DEBUG_KMS("\n");
+
+	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
+	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		DRM_INFO("Loading R100 Microcode\n");
+		fw_name = FIRMWARE_R100;
+	} else if ((rdev->family == CHIP_R200) ||
+		   (rdev->family == CHIP_RV250) ||
+		   (rdev->family == CHIP_RV280) ||
+		   (rdev->family == CHIP_RS300)) {
+		DRM_INFO("Loading R200 Microcode\n");
+		fw_name = FIRMWARE_R200;
+	} else if ((rdev->family == CHIP_R300) ||
+		   (rdev->family == CHIP_R350) ||
+		   (rdev->family == CHIP_RV350) ||
+		   (rdev->family == CHIP_RV380) ||
+		   (rdev->family == CHIP_RS400) ||
+		   (rdev->family == CHIP_RS480)) {
+		DRM_INFO("Loading R300 Microcode\n");
+		fw_name = FIRMWARE_R300;
+	} else if ((rdev->family == CHIP_R420) ||
+		   (rdev->family == CHIP_R423) ||
+		   (rdev->family == CHIP_RV410)) {
+		DRM_INFO("Loading R400 Microcode\n");
+		fw_name = FIRMWARE_R420;
+	} else if ((rdev->family == CHIP_RS690) ||
+		   (rdev->family == CHIP_RS740)) {
+		DRM_INFO("Loading RS690/RS740 Microcode\n");
+		fw_name = FIRMWARE_RS690;
+	} else if (rdev->family == CHIP_RS600) {
+		DRM_INFO("Loading RS600 Microcode\n");
+		fw_name = FIRMWARE_RS600;
+	} else if ((rdev->family == CHIP_RV515) ||
+		   (rdev->family == CHIP_R520) ||
+		   (rdev->family == CHIP_RV530) ||
+		   (rdev->family == CHIP_R580) ||
+		   (rdev->family == CHIP_RV560) ||
+		   (rdev->family == CHIP_RV570)) {
+		DRM_INFO("Loading R500 Microcode\n");
+		fw_name = FIRMWARE_R520;
+	}
+
+	err = 0;
+	rdev->me_fw = firmware_get(fw_name);
+	if (rdev->me_fw == NULL) {
+		DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n",
+		       fw_name);
+		err = -ENOENT;
+	} else if (rdev->me_fw->datasize % 8) {
+		DRM_ERROR(
+		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->datasize, fw_name);
+		err = -EINVAL;
+		firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
+		rdev->me_fw = NULL;
+	}
+	return err;
+}
+
+/**
+ * r100_cp_fini_microcode - drop the firmware image reference
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Drop the me firmware image reference.
+ * Called at driver shutdown.
+ */
+static void r100_cp_fini_microcode (struct radeon_device *rdev)
+{
+
+	if (rdev->me_fw != NULL) {
+		firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
+		rdev->me_fw = NULL;
+	}
+}
+
+static void r100_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i, size;
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+
+	if (rdev->me_fw) {
+		size = rdev->me_fw->datasize / 4;
+		fw_data = (const __be32 *)rdev->me_fw->data;
+		WREG32(RADEON_CP_ME_RAM_ADDR, 0);
+		for (i = 0; i < size; i += 2) {
+			WREG32(RADEON_CP_ME_RAM_DATAH,
+			       be32_to_cpup(&fw_data[i]));
+			WREG32(RADEON_CP_ME_RAM_DATAL,
+			       be32_to_cpup(&fw_data[i + 1]));
+		}
+	}
+}
+
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	unsigned rb_bufsz;
+	unsigned rb_blksz;
+	unsigned max_fetch;
+	unsigned pre_write_timer;
+	unsigned pre_write_limit;
+	unsigned indirect2_start;
+	unsigned indirect1_start;
+	uint32_t tmp;
+	int r;
+
+	if (r100_debugfs_cp_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for CP !\n");
+	}
+	if (!rdev->me_fw) {
+		r = r100_cp_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Align ring size */
+	rb_bufsz = drm_order(ring_size / 8);
+	ring_size = (1 << (rb_bufsz + 1)) * 4;
+	r100_cp_load_microcode(rdev);
+	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+			     0, 0x7fffff, RADEON_CP_PACKET2);
+	if (r) {
+		return r;
+	}
+	/* Each time the cp read 1024 bytes (16 dword/quadword) update
+	 * the rptr copy in system ram */
+	rb_blksz = 9;
+	/* cp will read 128bytes at a time (4 dwords) */
+	max_fetch = 1;
+	ring->align_mask = 16 - 1;
+	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
+	pre_write_timer = 64;
+	/* Force CP_RB_WPTR write if written more than one time before the
+	 * delay expire
+	 */
+	pre_write_limit = 0;
+	/* Setup the cp cache like this (cache size is 96 dwords) :
+	 *	RING		0  to 15
+	 *	INDIRECT1	16 to 79
+	 *	INDIRECT2	80 to 95
+	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
+	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+	 * Idea being that most of the gpu cmd will be through indirect1 buffer
+	 * so it gets the bigger cache.
+	 */
+	indirect2_start = 80;
+	indirect1_start = 16;
+	/* cp setup */
+	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
+	tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
+	       REG_SET(RADEON_MAX_FETCH, max_fetch));
+#ifdef __BIG_ENDIAN
+	tmp |= RADEON_BUF_SWAP_32BIT;
+#endif
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
+
+	/* Set ring address */
+	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+	WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
+	/* Force read & write ptr to 0 */
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(R_00070C_CP_RB_RPTR_ADDR,
+		S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
+	WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
+
+	if (rdev->wb.enabled)
+		WREG32(R_000770_SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RADEON_RB_NO_UPDATE;
+		WREG32(R_000770_SCRATCH_UMSK, 0);
+	}
+
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	udelay(10);
+	ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+	/* Set cp mode to bus mastering & enable cp*/
+	WREG32(RADEON_CP_CSQ_MODE,
+	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
+	WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+	WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
+	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+
+	/* at this point everything should be setup correctly to enable master */
+	pci_enable_busmaster(rdev->dev);
+
+	radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
+		return r;
+	}
+	ring->ready = true;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	if (!ring->rptr_save_reg /* not resuming from suspend */
+	    && radeon_ring_supports_scratch_reg(rdev, ring)) {
+		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+		if (r) {
+			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+			ring->rptr_save_reg = 0;
+		}
+	}
+	return 0;
+}
+
+void r100_cp_fini(struct radeon_device *rdev)
+{
+	if (r100_cp_wait_for_idle(rdev)) {
+		DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
+	}
+	/* Disable ring */
+	r100_cp_disable(rdev);
+	radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
+	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	DRM_INFO("radeon: cp finalized\n");
+}
+
+void r100_cp_disable(struct radeon_device *rdev)
+{
+	/* Disable ring */
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	WREG32(RADEON_CP_CSQ_MODE, 0);
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	WREG32(R_000770_SCRATCH_UMSK, 0);
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+}
+
+/*
+ * CS functions
+ */
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+			    struct radeon_cs_packet *pkt,
+			    unsigned idx,
+			    unsigned reg)
+{
+	int r;
+	u32 tile_flags = 0;
+	u32 tmp;
+	struct radeon_cs_reloc *reloc;
+	u32 value;
+
+	r = r100_cs_packet_next_reloc(p, &reloc);
+	if (r) {
+		DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+			  idx, reg);
+		r100_cs_dump_packet(p, pkt);
+		return r;
+	}
+
+	value = radeon_get_ib_value(p, idx);
+	tmp = value & 0x003fffff;
+	tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+
+	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+			tile_flags |= RADEON_DST_TILE_MACRO;
+		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+			if (reg == RADEON_SRC_PITCH_OFFSET) {
+				DRM_ERROR("Cannot src blit from microtiled surface\n");
+				r100_cs_dump_packet(p, pkt);
+				return -EINVAL;
+			}
+			tile_flags |= RADEON_DST_TILE_MICRO;
+		}
+
+		tmp |= tile_flags;
+		p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
+	} else
+		p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
+	return 0;
+}
+
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+			     struct radeon_cs_packet *pkt,
+			     int idx)
+{
+	unsigned c, i;
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	int r = 0;
+	volatile uint32_t *ib;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	c = radeon_get_ib_value(p, idx++) & 0x1F;
+	if (c > 16) {
+	    DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+		      pkt->opcode);
+	    r100_cs_dump_packet(p, pkt);
+	    return -EINVAL;
+	}
+	track->num_arrays = c;
+	for (i = 0; i < (c - 1); i+=2, idx+=3) {
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+				  pkt->opcode);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		idx_value = radeon_get_ib_value(p, idx);
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+
+		track->arrays[i + 0].esize = idx_value >> 8;
+		track->arrays[i + 0].robj = reloc->robj;
+		track->arrays[i + 0].esize &= 0x7F;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+				  pkt->opcode);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+		track->arrays[i + 1].robj = reloc->robj;
+		track->arrays[i + 1].esize = idx_value >> 24;
+		track->arrays[i + 1].esize &= 0x7F;
+	}
+	if (c & 1) {
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+					  pkt->opcode);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		idx_value = radeon_get_ib_value(p, idx);
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+		track->arrays[i + 0].robj = reloc->robj;
+		track->arrays[i + 0].esize = idx_value >> 8;
+		track->arrays[i + 0].esize &= 0x7F;
+	}
+	return r;
+}
+
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+			  struct radeon_cs_packet *pkt,
+			  const unsigned *auth, unsigned n,
+			  radeon_packet0_check_t check)
+{
+	unsigned reg;
+	unsigned i, j, m;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	/* Check that register fall into register range
+	 * determined by the number of entry (n) in the
+	 * safe register bitmap.
+	 */
+	if (pkt->one_reg_wr) {
+		if ((reg >> 7) > n) {
+			return -EINVAL;
+		}
+	} else {
+		if (((reg + (pkt->count << 2)) >> 7) > n) {
+			return -EINVAL;
+		}
+	}
+	for (i = 0; i <= pkt->count; i++, idx++) {
+		j = (reg >> 7);
+		m = 1 << ((reg >> 2) & 31);
+		if (auth[j] & m) {
+			r = check(p, pkt, idx, reg);
+			if (r) {
+				return r;
+			}
+		}
+		if (pkt->one_reg_wr) {
+			if (!(auth[j] & m)) {
+				break;
+			}
+		} else {
+			reg += 4;
+		}
+	}
+	return 0;
+}
+
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+			 struct radeon_cs_packet *pkt)
+{
+	volatile uint32_t *ib;
+	unsigned i;
+	unsigned idx;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx;
+	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
+		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+	}
+}
+
+/**
+ * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser:	parser structure holding parsing context.
+ * @pkt:	where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+			 struct radeon_cs_packet *pkt,
+			 unsigned idx)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	uint32_t header;
+
+	if (idx >= ib_chunk->length_dw) {
+		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+			  idx, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	header = radeon_get_ib_value(p, idx);
+	pkt->idx = idx;
+	pkt->type = CP_PACKET_GET_TYPE(header);
+	pkt->count = CP_PACKET_GET_COUNT(header);
+	switch (pkt->type) {
+	case PACKET_TYPE0:
+		pkt->reg = CP_PACKET0_GET_REG(header);
+		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
+		break;
+	case PACKET_TYPE3:
+		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+		break;
+	case PACKET_TYPE2:
+		pkt->count = -1;
+		break;
+	default:
+		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+		return -EINVAL;
+	}
+	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * r100_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET0 - WAIT_UNTIL +_value
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT UNTIL packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_cs_packet p3reloc, waitreloc;
+	int crtc_id;
+	int r;
+	uint32_t header, h_idx, reg;
+	volatile uint32_t *ib;
+
+	ib = p->ib.ptr;
+
+	/* parse the wait until */
+	r = r100_cs_packet_parse(p, &waitreloc, p->idx);
+	if (r)
+		return r;
+
+	/* check its a wait until and only 1 count */
+	if (waitreloc.reg != RADEON_WAIT_UNTIL ||
+	    waitreloc.count != 0) {
+		DRM_ERROR("vline wait had illegal wait until segment\n");
+		return -EINVAL;
+	}
+
+	if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
+		DRM_ERROR("vline wait had illegal wait until\n");
+		return -EINVAL;
+	}
+
+	/* jump over the NOP */
+	r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+	if (r)
+		return r;
+
+	h_idx = p->idx - 2;
+	p->idx += waitreloc.count + 2;
+	p->idx += p3reloc.count + 2;
+
+	header = radeon_get_ib_value(p, h_idx);
+	crtc_id = radeon_get_ib_value(p, h_idx + 5);
+	reg = CP_PACKET0_GET_REG(header);
+	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		DRM_ERROR("cannot find crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+	crtc = obj_to_crtc(obj);
+	radeon_crtc = to_radeon_crtc(crtc);
+	crtc_id = radeon_crtc->crtc_id;
+
+	if (!crtc->enabled) {
+		/* if the CRTC isn't enabled - we need to nop out the wait until */
+		ib[h_idx + 2] = PACKET2(0);
+		ib[h_idx + 3] = PACKET2(0);
+	} else if (crtc_id == 1) {
+		switch (reg) {
+		case AVIVO_D1MODE_VLINE_START_END:
+			header &= ~R300_CP_PACKET0_REG_MASK;
+			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
+			break;
+		case RADEON_CRTC_GUI_TRIG_VLINE:
+			header &= ~R300_CP_PACKET0_REG_MASK;
+			header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
+			break;
+		default:
+			DRM_ERROR("unknown crtc reloc\n");
+			return -EINVAL;
+		}
+		ib[h_idx] = header;
+		ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
+	}
+
+	return 0;
+}
+
+/**
+ * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser:		parser structure holding parsing context.
+ * @data:		pointer to relocation data
+ * @offset_start:	starting offset
+ * @offset_mask:	offset mask (to align start offset on)
+ * @reloc:		reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
+			      struct radeon_cs_reloc **cs_reloc)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_cs_packet p3reloc;
+	unsigned idx;
+	int r;
+
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	*cs_reloc = NULL;
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r) {
+		return r;
+	}
+	p->idx += p3reloc.count + 2;
+	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+			  p3reloc.idx);
+		r100_cs_dump_packet(p, &p3reloc);
+		return -EINVAL;
+	}
+	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		r100_cs_dump_packet(p, &p3reloc);
+		return -EINVAL;
+	}
+	/* FIXME: we assume reloc size is 4 dwords */
+	*cs_reloc = p->relocs_ptr[(idx / 4)];
+	return 0;
+}
+
+static int r100_get_vtx_size(uint32_t vtx_fmt)
+{
+	int vtx_size;
+	vtx_size = 2;
+	/* ordered according to bits in spec */
+	if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
+		vtx_size++;
+	/* blend weight */
+	if (vtx_fmt & (0x7 << 15))
+		vtx_size += (vtx_fmt >> 15) & 0x7;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
+		vtx_size++;
+	return vtx_size;
+}
+
+static int r100_packet0_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt,
+			      unsigned idx, unsigned reg)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp;
+	int r;
+	int i, face;
+	u32 tile_flags = 0;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (reg) {
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+		/* FIXME: only allow PACKET3 blit? easier to check for out of
+		 * range access */
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case RADEON_RB3D_DEPTHOFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_RB3D_COLOROFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[0].robj = reloc->robj;
+		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_PP_TXOFFSET_0:
+	case RADEON_PP_TXOFFSET_1:
+	case RADEON_PP_TXOFFSET_2:
+		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_TXO_MACRO_TILE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_TXO_MICRO_TILE_X2;
+
+			tmp = idx_value & ~(0x7 << 2);
+			tmp |= tile_flags;
+			ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+		} else
+			ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T0_0:
+	case RADEON_PP_CUBIC_OFFSET_T0_1:
+	case RADEON_PP_CUBIC_OFFSET_T0_2:
+	case RADEON_PP_CUBIC_OFFSET_T0_3:
+	case RADEON_PP_CUBIC_OFFSET_T0_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[0].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[0].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T1_0:
+	case RADEON_PP_CUBIC_OFFSET_T1_1:
+	case RADEON_PP_CUBIC_OFFSET_T1_2:
+	case RADEON_PP_CUBIC_OFFSET_T1_3:
+	case RADEON_PP_CUBIC_OFFSET_T1_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[1].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[1].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T2_0:
+	case RADEON_PP_CUBIC_OFFSET_T2_1:
+	case RADEON_PP_CUBIC_OFFSET_T2_2:
+	case RADEON_PP_CUBIC_OFFSET_T2_3:
+	case RADEON_PP_CUBIC_OFFSET_T2_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[2].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[2].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_RE_WIDTH_HEIGHT:
+		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_COLORPITCH:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_COLOR_TILE_ENABLE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		} else
+			ib[idx] = idx_value;
+
+		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
+		break;
+	case RADEON_RB3D_DEPTHPITCH:
+		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_CNTL:
+		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+		case 7:
+		case 8:
+		case 9:
+		case 11:
+		case 12:
+			track->cb[0].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 15:
+			track->cb[0].cpp = 2;
+			break;
+		case 6:
+			track->cb[0].cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+			return -EINVAL;
+		}
+		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZSTENCILCNTL:
+		switch (idx_value & 0xf) {
+		case 0:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+		case 3:
+		case 4:
+		case 5:
+		case 9:
+		case 11:
+			track->zb.cpp = 4;
+			break;
+		default:
+			break;
+		}
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZPASS_ADDR:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_PP_CNTL:
+		{
+			uint32_t temp = idx_value >> 4;
+			for (i = 0; i < track->num_texture; i++)
+				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
+		}
+		break;
+	case RADEON_SE_VF_CNTL:
+		track->vap_vf_cntl = idx_value;
+		break;
+	case RADEON_SE_VTX_FMT:
+		track->vtx_size = r100_get_vtx_size(idx_value);
+		break;
+	case RADEON_PP_TEX_SIZE_0:
+	case RADEON_PP_TEX_SIZE_1:
+	case RADEON_PP_TEX_SIZE_2:
+		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
+		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TEX_PITCH_0:
+	case RADEON_PP_TEX_PITCH_1:
+	case RADEON_PP_TEX_PITCH_2:
+		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
+		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TXFILTER_0:
+	case RADEON_PP_TXFILTER_1:
+	case RADEON_PP_TXFILTER_2:
+		i = (reg - RADEON_PP_TXFILTER_0) / 24;
+		track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
+						 >> RADEON_MAX_MIP_LEVEL_SHIFT);
+		tmp = (idx_value >> 23) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_w = false;
+		tmp = (idx_value >> 27) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TXFORMAT_0:
+	case RADEON_PP_TXFORMAT_1:
+	case RADEON_PP_TXFORMAT_2:
+		i = (reg - RADEON_PP_TXFORMAT_0) / 24;
+		if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
+			track->textures[i].use_pitch = 1;
+		} else {
+			track->textures[i].use_pitch = 0;
+			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+		}
+		if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+			track->textures[i].tex_coord_type = 2;
+		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
+		case RADEON_TXFORMAT_I8:
+		case RADEON_TXFORMAT_RGB332:
+		case RADEON_TXFORMAT_Y8:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_AI88:
+		case RADEON_TXFORMAT_ARGB1555:
+		case RADEON_TXFORMAT_RGB565:
+		case RADEON_TXFORMAT_ARGB4444:
+		case RADEON_TXFORMAT_VYUY422:
+		case RADEON_TXFORMAT_YVYU422:
+		case RADEON_TXFORMAT_SHADOW16:
+		case RADEON_TXFORMAT_LDUDV655:
+		case RADEON_TXFORMAT_DUDV88:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_ARGB8888:
+		case RADEON_TXFORMAT_RGBA8888:
+		case RADEON_TXFORMAT_SHADOW32:
+		case RADEON_TXFORMAT_LDUDUV8888:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case RADEON_TXFORMAT_DXT23:
+		case RADEON_TXFORMAT_DXT45:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+			break;
+		}
+		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_FACES_0:
+	case RADEON_PP_CUBIC_FACES_1:
+	case RADEON_PP_CUBIC_FACES_2:
+		tmp = idx_value;
+		i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
+		for (face = 0; face < 4; face++) {
+			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
+			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
+		}
+		track->tex_dirty = true;
+		break;
+	default:
+		DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
+		       reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+					 struct radeon_cs_packet *pkt,
+					 struct radeon_bo *robj)
+{
+	unsigned idx;
+	u32 value;
+	idx = pkt->idx + 1;
+	value = radeon_get_ib_value(p, idx + 2);
+	if ((value + 1) > radeon_bo_size(robj)) {
+		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
+			  "(need %u have %lu) !\n",
+			  value + 1,
+			  radeon_bo_size(robj));
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r100_packet3_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	unsigned idx;
+	volatile uint32_t *ib;
+	int r;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	track = (struct r100_cs_track *)p->track;
+	switch (pkt->opcode) {
+	case PACKET3_3D_LOAD_VBPNTR:
+		r = r100_packet3_load_vbpntr(p, pkt, idx);
+		if (r)
+			return r;
+		break;
+	case PACKET3_INDX_BUFFER:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
+		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
+		if (r) {
+			return r;
+		}
+		break;
+	case 0x23:
+		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
+		track->num_arrays = 1;
+		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
+
+		track->arrays[0].robj = reloc->robj;
+		track->arrays[0].esize = track->vtx_size;
+
+		track->max_indx = radeon_get_ib_value(p, idx+1);
+
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+	case PACKET3_3D_DRAW_IMMD:
+		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using in-packet vertex data */
+	case PACKET3_3D_DRAW_IMMD_2:
+		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		track->immd_dwords = pkt->count;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using in-packet vertex data */
+	case PACKET3_3D_DRAW_VBUF_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing of vertex buffers setup elsewhere */
+	case PACKET3_3D_DRAW_INDX_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using indices to vertex buffer */
+	case PACKET3_3D_DRAW_VBUF:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing of vertex buffers setup elsewhere */
+	case PACKET3_3D_DRAW_INDX:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using indices to vertex buffer */
+	case PACKET3_3D_CLEAR_HIZ:
+	case PACKET3_3D_CLEAR_ZMASK:
+		if (p->rdev->hyperz_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r100_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r100_cs_track *track;
+	int r;
+
+	track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
+	if (!track)
+		return -ENOMEM;
+	r100_cs_track_clear(p->rdev, track);
+	p->track = track;
+	do {
+		r = r100_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+			case PACKET_TYPE0:
+				if (p->rdev->family >= CHIP_R200)
+					r = r100_cs_parse_packet0(p, &pkt,
+								  p->rdev->config.r100.reg_safe_bm,
+								  p->rdev->config.r100.reg_safe_bm_size,
+								  &r200_packet0_check);
+				else
+					r = r100_cs_parse_packet0(p, &pkt,
+								  p->rdev->config.r100.reg_safe_bm,
+								  p->rdev->config.r100.reg_safe_bm_size,
+								  &r100_packet0_check);
+				break;
+			case PACKET_TYPE2:
+				break;
+			case PACKET_TYPE3:
+				r = r100_packet3_check(p, &pkt);
+				break;
+			default:
+				DRM_ERROR("Unknown packet type %d !\n",
+					  pkt.type);
+				free(p->track, DRM_MEM_DRIVER);
+				p->track = NULL;
+				return -EINVAL;
+		}
+		if (r) {
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return r;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+	free(p->track, DRM_MEM_DRIVER);
+	p->track = NULL;
+	return 0;
+}
+
+static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+{
+	DRM_ERROR("pitch                      %d\n", t->pitch);
+	DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
+	DRM_ERROR("width                      %d\n", t->width);
+	DRM_ERROR("width_11                   %d\n", t->width_11);
+	DRM_ERROR("height                     %d\n", t->height);
+	DRM_ERROR("height_11                  %d\n", t->height_11);
+	DRM_ERROR("num levels                 %d\n", t->num_levels);
+	DRM_ERROR("depth                      %d\n", t->txdepth);
+	DRM_ERROR("bpp                        %d\n", t->cpp);
+	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
+	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
+	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+	DRM_ERROR("compress format            %d\n", t->compress_format);
+}
+
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+	int block_width, block_height, block_bytes;
+	int wblocks, hblocks;
+	int min_wblocks;
+	int sz;
+
+	block_width = 4;
+	block_height = 4;
+
+	switch (compress_format) {
+	case R100_TRACK_COMP_DXT1:
+		block_bytes = 8;
+		min_wblocks = 4;
+		break;
+	default:
+	case R100_TRACK_COMP_DXT35:
+		block_bytes = 16;
+		min_wblocks = 2;
+		break;
+	}
+
+	hblocks = (h + block_height - 1) / block_height;
+	wblocks = (w + block_width - 1) / block_width;
+	if (wblocks < min_wblocks)
+		wblocks = min_wblocks;
+	sz = wblocks * hblocks * block_bytes;
+	return sz;
+}
+
+static int r100_cs_track_cube(struct radeon_device *rdev,
+			      struct r100_cs_track *track, unsigned idx)
+{
+	unsigned face, w, h;
+	struct radeon_bo *cube_robj;
+	unsigned long size;
+	unsigned compress_format = track->textures[idx].compress_format;
+
+	for (face = 0; face < 5; face++) {
+		cube_robj = track->textures[idx].cube_info[face].robj;
+		w = track->textures[idx].cube_info[face].width;
+		h = track->textures[idx].cube_info[face].height;
+
+		if (compress_format) {
+			size = r100_track_compress_size(compress_format, w, h);
+		} else
+			size = w * h;
+		size *= track->textures[idx].cpp;
+
+		size += track->textures[idx].cube_info[face].offset;
+
+		if (size > radeon_bo_size(cube_robj)) {
+			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+				  size, radeon_bo_size(cube_robj));
+			r100_cs_track_texture_print(&track->textures[idx]);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int r100_cs_track_texture_check(struct radeon_device *rdev,
+				       struct r100_cs_track *track)
+{
+	struct radeon_bo *robj;
+	unsigned long size;
+	unsigned u, i, w, h, d;
+	int ret;
+
+	for (u = 0; u < track->num_texture; u++) {
+		if (!track->textures[u].enabled)
+			continue;
+		if (track->textures[u].lookup_disable)
+			continue;
+		robj = track->textures[u].robj;
+		if (robj == NULL) {
+			DRM_ERROR("No texture bound to unit %u\n", u);
+			return -EINVAL;
+		}
+		size = 0;
+		for (i = 0; i <= track->textures[u].num_levels; i++) {
+			if (track->textures[u].use_pitch) {
+				if (rdev->family < CHIP_R300)
+					w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
+				else
+					w = track->textures[u].pitch / (1 << i);
+			} else {
+				w = track->textures[u].width;
+				if (rdev->family >= CHIP_RV515)
+					w |= track->textures[u].width_11;
+				w = w / (1 << i);
+				if (track->textures[u].roundup_w)
+					w = roundup_pow_of_two(w);
+			}
+			h = track->textures[u].height;
+			if (rdev->family >= CHIP_RV515)
+				h |= track->textures[u].height_11;
+			h = h / (1 << i);
+			if (track->textures[u].roundup_h)
+				h = roundup_pow_of_two(h);
+			if (track->textures[u].tex_coord_type == 1) {
+				d = (1 << track->textures[u].txdepth) / (1 << i);
+				if (!d)
+					d = 1;
+			} else {
+				d = 1;
+			}
+			if (track->textures[u].compress_format) {
+
+				size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
+				/* compressed textures are block based */
+			} else
+				size += w * h * d;
+		}
+		size *= track->textures[u].cpp;
+
+		switch (track->textures[u].tex_coord_type) {
+		case 0:
+		case 1:
+			break;
+		case 2:
+			if (track->separate_cube) {
+				ret = r100_cs_track_cube(rdev, track, u);
+				if (ret)
+					return ret;
+			} else
+				size *= 6;
+			break;
+		default:
+			DRM_ERROR("Invalid texture coordinate type %u for unit "
+				  "%u\n", track->textures[u].tex_coord_type, u);
+			return -EINVAL;
+		}
+		if (size > radeon_bo_size(robj)) {
+			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
+				  "%lu\n", u, size, radeon_bo_size(robj));
+			r100_cs_track_texture_print(&track->textures[u]);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+	unsigned i;
+	unsigned long size;
+	unsigned prim_walk;
+	unsigned nverts;
+	unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
+
+	if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
+	    !track->blend_read_enable)
+		num_cb = 0;
+
+	for (i = 0; i < num_cb; i++) {
+		if (track->cb[i].robj == NULL) {
+			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+			return -EINVAL;
+		}
+		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+		size += track->cb[i].offset;
+		if (size > radeon_bo_size(track->cb[i].robj)) {
+			DRM_ERROR("[drm] Buffer too small for color buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->cb[i].robj));
+			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+				  i, track->cb[i].pitch, track->cb[i].cpp,
+				  track->cb[i].offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->cb_dirty = false;
+
+	if (track->zb_dirty && track->z_enabled) {
+		if (track->zb.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for z buffer !\n");
+			return -EINVAL;
+		}
+		size = track->zb.pitch * track->zb.cpp * track->maxy;
+		size += track->zb.offset;
+		if (size > radeon_bo_size(track->zb.robj)) {
+			DRM_ERROR("[drm] Buffer too small for z buffer "
+				  "(need %lu have %lu) !\n", size,
+				  radeon_bo_size(track->zb.robj));
+			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
+				  track->zb.pitch, track->zb.cpp,
+				  track->zb.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->zb_dirty = false;
+
+	if (track->aa_dirty && track->aaresolve) {
+		if (track->aa.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+			return -EINVAL;
+		}
+		/* I believe the format comes from colorbuffer0. */
+		size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+		size += track->aa.offset;
+		if (size > radeon_bo_size(track->aa.robj)) {
+			DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->aa.robj));
+			DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+				  i, track->aa.pitch, track->cb[0].cpp,
+				  track->aa.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->aa_dirty = false;
+
+	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
+	if (track->vap_vf_cntl & (1 << 14)) {
+		nverts = track->vap_alt_nverts;
+	} else {
+		nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+	}
+	switch (prim_walk) {
+	case 1:
+		for (i = 0; i < track->num_arrays; i++) {
+			size = track->arrays[i].esize * track->max_indx * 4;
+			if (track->arrays[i].robj == NULL) {
+				DRM_ERROR("(PW %u) Vertex array %u no buffer "
+					  "bound\n", prim_walk, i);
+				return -EINVAL;
+			}
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
+				DRM_ERROR("Max indices %u\n", track->max_indx);
+				return -EINVAL;
+			}
+		}
+		break;
+	case 2:
+		for (i = 0; i < track->num_arrays; i++) {
+			size = track->arrays[i].esize * (nverts - 1) * 4;
+			if (track->arrays[i].robj == NULL) {
+				DRM_ERROR("(PW %u) Vertex array %u no buffer "
+					  "bound\n", prim_walk, i);
+				return -EINVAL;
+			}
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
+				return -EINVAL;
+			}
+		}
+		break;
+	case 3:
+		size = track->vtx_size * nverts;
+		if (size != track->immd_dwords) {
+			DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
+				  track->immd_dwords, size);
+			DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+				  nverts, track->vtx_size);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+			  prim_walk);
+		return -EINVAL;
+	}
+
+	if (track->tex_dirty) {
+		track->tex_dirty = false;
+		return r100_cs_track_texture_check(rdev, track);
+	}
+	return 0;
+}
+
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+	unsigned i, face;
+
+	track->cb_dirty = true;
+	track->zb_dirty = true;
+	track->tex_dirty = true;
+	track->aa_dirty = true;
+
+	if (rdev->family < CHIP_R300) {
+		track->num_cb = 1;
+		if (rdev->family <= CHIP_RS200)
+			track->num_texture = 3;
+		else
+			track->num_texture = 6;
+		track->maxy = 2048;
+		track->separate_cube = 1;
+	} else {
+		track->num_cb = 4;
+		track->num_texture = 16;
+		track->maxy = 4096;
+		track->separate_cube = 0;
+		track->aaresolve = false;
+		track->aa.robj = NULL;
+	}
+
+	for (i = 0; i < track->num_cb; i++) {
+		track->cb[i].robj = NULL;
+		track->cb[i].pitch = 8192;
+		track->cb[i].cpp = 16;
+		track->cb[i].offset = 0;
+	}
+	track->z_enabled = true;
+	track->zb.robj = NULL;
+	track->zb.pitch = 8192;
+	track->zb.cpp = 4;
+	track->zb.offset = 0;
+	track->vtx_size = 0x7F;
+	track->immd_dwords = 0xFFFFFFFFUL;
+	track->num_arrays = 11;
+	track->max_indx = 0x00FFFFFFUL;
+	for (i = 0; i < track->num_arrays; i++) {
+		track->arrays[i].robj = NULL;
+		track->arrays[i].esize = 0x7F;
+	}
+	for (i = 0; i < track->num_texture; i++) {
+		track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+		track->textures[i].pitch = 16536;
+		track->textures[i].width = 16536;
+		track->textures[i].height = 16536;
+		track->textures[i].width_11 = 1 << 11;
+		track->textures[i].height_11 = 1 << 11;
+		track->textures[i].num_levels = 12;
+		if (rdev->family <= CHIP_RS200) {
+			track->textures[i].tex_coord_type = 0;
+			track->textures[i].txdepth = 0;
+		} else {
+			track->textures[i].txdepth = 16;
+			track->textures[i].tex_coord_type = 1;
+		}
+		track->textures[i].cpp = 64;
+		track->textures[i].robj = NULL;
+		/* CS IB emission code makes sure texture unit are disabled */
+		track->textures[i].enabled = false;
+		track->textures[i].lookup_disable = false;
+		track->textures[i].roundup_w = true;
+		track->textures[i].roundup_h = true;
+		if (track->separate_cube)
+			for (face = 0; face < 5; face++) {
+				track->textures[i].cube_info[face].robj = NULL;
+				track->textures[i].cube_info[face].width = 16536;
+				track->textures[i].cube_info[face].height = 16536;
+				track->textures[i].cube_info[face].offset = 0;
+			}
+	}
+}
+
+/*
+ * Global GPU functions
+ */
+static void r100_errata(struct radeon_device *rdev)
+{
+	rdev->pll_errata = 0;
+
+	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
+		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
+	}
+
+	if (rdev->family == CHIP_RV100 ||
+	    rdev->family == CHIP_RS100 ||
+	    rdev->family == CHIP_RS200) {
+		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
+	}
+}
+
+static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
+		if (tmp >= n) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+int r100_gui_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
+		DRM_ERROR("radeon: wait for empty RBBM fifo failed !"
+		       " Bad things might happen.\n");
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(RADEON_RBBM_STATUS);
+		if (!(tmp & RADEON_RBBM_ACTIVE)) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+int r100_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & RADEON_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 rbbm_status;
+
+	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	/* Enable bus mastering */
+	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+	WREG32(RADEON_BUS_CNTL, tmp);
+}
+
+void r100_bm_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* disable bus mastering */
+	tmp = RREG32(R_000030_BUS_CNTL);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+	mdelay(1);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+	mdelay(1);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	mdelay(1);
+	pci_disable_busmaster(rdev->dev);
+	mdelay(1);
+}
+
+int r100_asic_reset(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	r100_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	/* save PCI state */
+	pci_save_state(device_get_parent(rdev->dev));
+	/* disable bus mastering */
+	r100_bm_disable(rdev);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+					S_0000F0_SOFT_RESET_RE(1) |
+					S_0000F0_SOFT_RESET_PP(1) |
+					S_0000F0_SOFT_RESET_RB(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset CP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(device_get_parent(rdev->dev));
+	r100_enable_bm(rdev);
+	/* Check if GPU is idle */
+	if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	r100_mc_resume(rdev, &save);
+	return ret;
+}
+
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	bool force_dac2 = false;
+	u32 tmp;
+
+	/* set these so they don't interfere with anything */
+	WREG32(RADEON_OV0_SCALE_CNTL, 0);
+	WREG32(RADEON_SUBPIC_CNTL, 0);
+	WREG32(RADEON_VIPH_CONTROL, 0);
+	WREG32(RADEON_I2C_CNTL_1, 0);
+	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+
+	/* always set up dac2 on rn50 and some rv100 as lots
+	 * of servers seem to wire it up to a VGA port but
+	 * don't report it in the bios connector
+	 * table.
+	 */
+	switch (dev->pci_device) {
+		/* RN50 */
+	case 0x515e:
+	case 0x5969:
+		force_dac2 = true;
+		break;
+		/* RV100*/
+	case 0x5159:
+	case 0x515a:
+		/* DELL triple head servers */
+		if ((dev->pci_subvendor == 0x1028 /* DELL */) &&
+		    ((dev->pci_subdevice == 0x016c) ||
+		     (dev->pci_subdevice == 0x016d) ||
+		     (dev->pci_subdevice == 0x016e) ||
+		     (dev->pci_subdevice == 0x016f) ||
+		     (dev->pci_subdevice == 0x0170) ||
+		     (dev->pci_subdevice == 0x017d) ||
+		     (dev->pci_subdevice == 0x017e) ||
+		     (dev->pci_subdevice == 0x0183) ||
+		     (dev->pci_subdevice == 0x018a) ||
+		     (dev->pci_subdevice == 0x019a)))
+			force_dac2 = true;
+		break;
+	}
+
+	if (force_dac2) {
+		u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+		u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+		u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+
+		/* For CRT on DAC2, don't turn it on if BIOS didn't
+		   enable it, even it's detected.
+		*/
+
+		/* force it to crtc0 */
+		dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+		dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+		disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+
+		/* set up the TV DAC */
+		tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
+				 RADEON_TV_DAC_STD_MASK |
+				 RADEON_TV_DAC_RDACPD |
+				 RADEON_TV_DAC_GDACPD |
+				 RADEON_TV_DAC_BDACPD |
+				 RADEON_TV_DAC_BGADJ_MASK |
+				 RADEON_TV_DAC_DACADJ_MASK);
+		tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+				RADEON_TV_DAC_NHOLD |
+				RADEON_TV_DAC_STD_PS2 |
+				(0x58 << 16));
+
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	}
+
+	/* switch PM block to ACPI mode */
+	tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+	tmp &= ~RADEON_PM_MODE_SEL;
+	WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+
+}
+
+/*
+ * VRAM info
+ */
+static void r100_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_is_ddr = false;
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->mc.vram_is_ddr = true;
+	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
+		rdev->mc.vram_is_ddr = true;
+	if ((rdev->family == CHIP_RV100) ||
+	    (rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		tmp = RREG32(RADEON_MEM_CNTL);
+		if (tmp & RV100_HALF_MODE) {
+			rdev->mc.vram_width = 32;
+		} else {
+			rdev->mc.vram_width = 64;
+		}
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			rdev->mc.vram_width /= 4;
+			rdev->mc.vram_is_ddr = true;
+		}
+	} else if (rdev->family <= CHIP_RV280) {
+		tmp = RREG32(RADEON_MEM_CNTL);
+		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
+			rdev->mc.vram_width = 128;
+		} else {
+			rdev->mc.vram_width = 64;
+		}
+	} else {
+		/* newer IGPs */
+		rdev->mc.vram_width = 128;
+	}
+}
+
+static u32 r100_get_accessible_vram(struct radeon_device *rdev)
+{
+	u32 aper_size;
+	u8 byte;
+
+	aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
+
+	/* Set HDP_APER_CNTL only on cards that are known not to be broken,
+	 * that is has the 2nd generation multifunction PCI interface
+	 */
+	if (rdev->family == CHIP_RV280 ||
+	    rdev->family >= CHIP_RV350) {
+		WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
+		       ~RADEON_HDP_APER_CNTL);
+		DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
+		return aper_size * 2;
+	}
+
+	/* Older cards have all sorts of funny issues to deal with. First
+	 * check if it's a multifunction card by reading the PCI config
+	 * header type... Limit those to one aperture size
+	 */
+	byte = pci_read_config(rdev->dev, 0xe, 1);
+	if (byte & 0x80) {
+		DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
+		DRM_INFO("Limiting VRAM to one aperture\n");
+		return aper_size;
+	}
+
+	/* Single function older card. We read HDP_APER_CNTL to see how the BIOS
+	 * have set it up. We don't write this as it's broken on some ASICs but
+	 * we expect the BIOS to have done the right thing (might be too optimistic...)
+	 */
+	if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
+		return aper_size * 2;
+	return aper_size;
+}
+
+void r100_vram_init_sizes(struct radeon_device *rdev)
+{
+	u64 config_aper_size;
+
+	/* work out accessible VRAM */
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+	rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
+	/* FIXME we don't use the second aperture yet when we could use it */
+	if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
+		rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
+	if (rdev->flags & RADEON_IS_IGP) {
+		uint32_t tom;
+		/* read NB_TOM to get the amount of ram stolen for the GPU */
+		tom = RREG32(RADEON_NB_TOM);
+		rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
+		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	} else {
+		rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+		/* Some production boards of m6 will report 0
+		 * if it's 8 MB
+		 */
+		if (rdev->mc.real_vram_size == 0) {
+			rdev->mc.real_vram_size = 8192 * 1024;
+			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+		}
+		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
+		 * Novell bug 204882 + along with lots of ubuntu ones
+		 */
+		if (rdev->mc.aper_size > config_aper_size)
+			config_aper_size = rdev->mc.aper_size;
+
+		if (config_aper_size > rdev->mc.real_vram_size)
+			rdev->mc.mc_vram_size = config_aper_size;
+		else
+			rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	}
+}
+
+void r100_vga_set_state(struct radeon_device *rdev, bool state)
+{
+	uint32_t temp;
+
+	temp = RREG32(RADEON_CONFIG_CNTL);
+	if (state == false) {
+		temp &= ~RADEON_CFG_VGA_RAM_EN;
+		temp |= RADEON_CFG_VGA_IO_DIS;
+	} else {
+		temp &= ~RADEON_CFG_VGA_IO_DIS;
+	}
+	WREG32(RADEON_CONFIG_CNTL, temp);
+}
+
+static void r100_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	r100_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	base = rdev->mc.aper_base;
+	if (rdev->flags & RADEON_IS_IGP)
+		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+void r100_pll_errata_after_index(struct radeon_device *rdev)
+{
+	if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
+		(void)RREG32(RADEON_CLOCK_CNTL_DATA);
+		(void)RREG32(RADEON_CRTC_GEN_CNTL);
+	}
+}
+
+static void r100_pll_errata_after_data(struct radeon_device *rdev)
+{
+	/* This workarounds is necessary on RV100, RS100 and RS200 chips
+	 * or the chip could hang on a subsequent access
+	 */
+	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
+		mdelay(5);
+	}
+
+	/* This function is required to workaround a hardware bug in some (all?)
+	 * revisions of the R300.  This workaround should be called after every
+	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
+	 * may not be correct.
+	 */
+	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
+		uint32_t save, tmp;
+
+		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
+		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
+		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
+		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
+		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
+	}
+}
+
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t data;
+
+	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
+	r100_pll_errata_after_index(rdev);
+	data = RREG32(RADEON_CLOCK_CNTL_DATA);
+	r100_pll_errata_after_data(rdev);
+	return data;
+}
+
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
+	r100_pll_errata_after_index(rdev);
+	WREG32(RADEON_CLOCK_CNTL_DATA, v);
+	r100_pll_errata_after_data(rdev);
+}
+
+static void r100_set_safe_registers(struct radeon_device *rdev)
+{
+	if (ASIC_IS_RN50(rdev)) {
+		rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
+		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
+	} else if (rdev->family < CHIP_R200) {
+		rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
+		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
+	} else {
+		r200_set_safe_registers(rdev);
+	}
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t reg, value;
+	unsigned i;
+
+	seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
+	seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	for (i = 0; i < 64; i++) {
+		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
+		reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
+		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
+		value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
+		seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
+	}
+	return 0;
+}
+
+static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t rdp, wdp;
+	unsigned count, i, j;
+
+	radeon_ring_free_size(rdev, ring);
+	rdp = RREG32(RADEON_CP_RB_RPTR);
+	wdp = RREG32(RADEON_CP_RB_WPTR);
+	count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
+	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+	seq_printf(m, "%u dwords in ring\n", count);
+	for (j = 0; j <= count; j++) {
+		i = (rdp + j) & ring->ptr_mask;
+		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+	}
+	return 0;
+}
+
+
+static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t csq_stat, csq2_stat, tmp;
+	unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
+	unsigned i;
+
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
+	csq_stat = RREG32(RADEON_CP_CSQ_STAT);
+	csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
+	r_rptr = (csq_stat >> 0) & 0x3ff;
+	r_wptr = (csq_stat >> 10) & 0x3ff;
+	ib1_rptr = (csq_stat >> 20) & 0x3ff;
+	ib1_wptr = (csq2_stat >> 0) & 0x3ff;
+	ib2_rptr = (csq2_stat >> 10) & 0x3ff;
+	ib2_wptr = (csq2_stat >> 20) & 0x3ff;
+	seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
+	seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
+	seq_printf(m, "Ring rptr %u\n", r_rptr);
+	seq_printf(m, "Ring wptr %u\n", r_wptr);
+	seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
+	seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
+	seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
+	seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
+	/* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
+	 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
+	seq_printf(m, "Ring fifo:\n");
+	for (i = 0; i < 256; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
+	}
+	seq_printf(m, "Indirect1 fifo:\n");
+	for (i = 256; i <= 512; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
+	}
+	seq_printf(m, "Indirect2 fifo:\n");
+	for (i = 640; i < ib1_wptr; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
+	}
+	return 0;
+}
+
+static int r100_debugfs_mc_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(RADEON_CONFIG_MEMSIZE);
+	seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_MC_FB_LOCATION);
+	seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_MC_AGP_LOCATION);
+	seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AGP_BASE);
+	seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_HOST_PATH_CNTL);
+	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(0x01D0);
+	seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AIC_LO_ADDR);
+	seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AIC_HI_ADDR);
+	seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
+	tmp = RREG32(0x01E4);
+	seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list r100_debugfs_rbbm_list[] = {
+	{"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_cp_list[] = {
+	{"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
+	{"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_mc_info_list[] = {
+	{"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r100_debugfs_rbbm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
+#else
+	return 0;
+#endif
+}
+
+int r100_debugfs_cp_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
+#else
+	return 0;
+#endif
+}
+
+int r100_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size)
+{
+	int surf_index = reg * 16;
+	int flags = 0;
+
+	if (rdev->family <= CHIP_RS200) {
+		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+			flags |= RADEON_SURF_TILE_COLOR_BOTH;
+		if (tiling_flags & RADEON_TILING_MACRO)
+			flags |= RADEON_SURF_TILE_COLOR_MACRO;
+	} else if (rdev->family <= CHIP_RV280) {
+		if (tiling_flags & (RADEON_TILING_MACRO))
+			flags |= R200_SURF_TILE_COLOR_MACRO;
+		if (tiling_flags & RADEON_TILING_MICRO)
+			flags |= R200_SURF_TILE_COLOR_MICRO;
+	} else {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			flags |= R300_SURF_TILE_MACRO;
+		if (tiling_flags & RADEON_TILING_MICRO)
+			flags |= R300_SURF_TILE_MICRO;
+	}
+
+	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
+		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
+	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
+		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
+
+	/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
+	if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
+		if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
+			if (ASIC_IS_RN50(rdev))
+				pitch /= 16;
+	}
+
+	/* r100/r200 divide by 16 */
+	if (rdev->family < CHIP_R300)
+		flags |= pitch / 16;
+	else
+		flags |= pitch / 8;
+
+
+	DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
+	WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
+	WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
+	WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
+	return 0;
+}
+
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
+{
+	int surf_index = reg * 16;
+	WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
+}
+
+void r100_bandwidth_update(struct radeon_device *rdev)
+{
+	fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
+	fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
+	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
+	uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
+	fixed20_12 memtcas_ff[8] = {
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(0),
+		dfixed_init_half(1),
+		dfixed_init_half(2),
+		dfixed_init(0),
+	};
+	fixed20_12 memtcas_rs480_ff[8] = {
+		dfixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(0),
+		dfixed_init_half(1),
+		dfixed_init_half(2),
+		dfixed_init_half(3),
+	};
+	fixed20_12 memtcas2_ff[8] = {
+		dfixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(4),
+		dfixed_init(5),
+		dfixed_init(6),
+		dfixed_init(7),
+	};
+	fixed20_12 memtrbs[8] = {
+		dfixed_init(1),
+		dfixed_init_half(1),
+		dfixed_init(2),
+		dfixed_init_half(2),
+		dfixed_init(3),
+		dfixed_init_half(3),
+		dfixed_init(4),
+		dfixed_init_half(4)
+	};
+	fixed20_12 memtrbs_r4xx[8] = {
+		dfixed_init(4),
+		dfixed_init(5),
+		dfixed_init(6),
+		dfixed_init(7),
+		dfixed_init(8),
+		dfixed_init(9),
+		dfixed_init(10),
+		dfixed_init(11)
+	};
+	fixed20_12 min_mem_eff;
+	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
+	fixed20_12 cur_latency_mclk, cur_latency_sclk;
+	fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
+		disp_drain_rate2, read_return_rate;
+	fixed20_12 time_disp1_drop_priority;
+	int c;
+	int cur_size = 16;       /* in octawords */
+	int critical_point = 0, critical_point2;
+/* 	uint32_t read_return_rate, time_disp1_drop_priority; */
+	int stop_req, max_stop_req;
+	struct drm_display_mode *mode1 = NULL;
+	struct drm_display_mode *mode2 = NULL;
+	uint32_t pixel_bytes1 = 0;
+	uint32_t pixel_bytes2 = 0;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled) {
+		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
+		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
+	}
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		if (rdev->mode_info.crtcs[1]->base.enabled) {
+			mode2 = &rdev->mode_info.crtcs[1]->base.mode;
+			pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
+		}
+	}
+
+	min_mem_eff.full = dfixed_const_8(0);
+	/* get modes */
+	if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
+		uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
+		mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
+		mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
+		/* check crtc enables */
+		if (mode2)
+			mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
+		if (mode1)
+			mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
+		WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
+	}
+
+	/*
+	 * determine is there is enough bw for current mode
+	 */
+	sclk_ff = rdev->pm.sclk;
+	mclk_ff = rdev->pm.mclk;
+
+	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
+	temp_ff.full = dfixed_const(temp);
+	mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
+
+	pix_clk.full = 0;
+	pix_clk2.full = 0;
+	peak_disp_bw.full = 0;
+	if (mode1) {
+		temp_ff.full = dfixed_const(1000);
+		pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+		pix_clk.full = dfixed_div(pix_clk, temp_ff);
+		temp_ff.full = dfixed_const(pixel_bytes1);
+		peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
+	}
+	if (mode2) {
+		temp_ff.full = dfixed_const(1000);
+		pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+		pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+		temp_ff.full = dfixed_const(pixel_bytes2);
+		peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
+	}
+
+	mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
+	if (peak_disp_bw.full >= mem_bw.full) {
+		DRM_ERROR("You may not have enough display bandwidth for current mode\n"
+			  "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
+	}
+
+	/*  Get values from the EXT_MEM_CNTL register...converting its contents. */
+	temp = RREG32(RADEON_MEM_TIMING_CNTL);
+	if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
+		mem_trcd = ((temp >> 2) & 0x3) + 1;
+		mem_trp  = ((temp & 0x3)) + 1;
+		mem_tras = ((temp & 0x70) >> 4) + 1;
+	} else if (rdev->family == CHIP_R300 ||
+		   rdev->family == CHIP_R350) { /* r300, r350 */
+		mem_trcd = (temp & 0x7) + 1;
+		mem_trp = ((temp >> 8) & 0x7) + 1;
+		mem_tras = ((temp >> 11) & 0xf) + 4;
+	} else if (rdev->family == CHIP_RV350 ||
+		   rdev->family <= CHIP_RV380) {
+		/* rv3x0 */
+		mem_trcd = (temp & 0x7) + 3;
+		mem_trp = ((temp >> 8) & 0x7) + 3;
+		mem_tras = ((temp >> 11) & 0xf) + 6;
+	} else if (rdev->family == CHIP_R420 ||
+		   rdev->family == CHIP_R423 ||
+		   rdev->family == CHIP_RV410) {
+		/* r4xx */
+		mem_trcd = (temp & 0xf) + 3;
+		if (mem_trcd > 15)
+			mem_trcd = 15;
+		mem_trp = ((temp >> 8) & 0xf) + 3;
+		if (mem_trp > 15)
+			mem_trp = 15;
+		mem_tras = ((temp >> 12) & 0x1f) + 6;
+		if (mem_tras > 31)
+			mem_tras = 31;
+	} else { /* RV200, R200 */
+		mem_trcd = (temp & 0x7) + 1;
+		mem_trp = ((temp >> 8) & 0x7) + 1;
+		mem_tras = ((temp >> 12) & 0xf) + 4;
+	}
+	/* convert to FF */
+	trcd_ff.full = dfixed_const(mem_trcd);
+	trp_ff.full = dfixed_const(mem_trp);
+	tras_ff.full = dfixed_const(mem_tras);
+
+	/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
+	temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+	data = (temp & (7 << 20)) >> 20;
+	if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family == CHIP_RS480) /* don't think rs400 */
+			tcas_ff = memtcas_rs480_ff[data];
+		else
+			tcas_ff = memtcas_ff[data];
+	} else
+		tcas_ff = memtcas2_ff[data];
+
+	if (rdev->family == CHIP_RS400 ||
+	    rdev->family == CHIP_RS480) {
+		/* extra cas latency stored in bits 23-25 0-4 clocks */
+		data = (temp >> 23) & 0x7;
+		if (data < 5)
+			tcas_ff.full += dfixed_const(data);
+	}
+
+	if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+		/* on the R300, Tcas is included in Trbs.
+		 */
+		temp = RREG32(RADEON_MEM_CNTL);
+		data = (R300_MEM_NUM_CHANNELS_MASK & temp);
+		if (data == 1) {
+			if (R300_MEM_USE_CD_CH_ONLY & temp) {
+				temp = RREG32(R300_MC_IND_INDEX);
+				temp &= ~R300_MC_IND_ADDR_MASK;
+				temp |= R300_MC_READ_CNTL_CD_mcind;
+				WREG32(R300_MC_IND_INDEX, temp);
+				temp = RREG32(R300_MC_IND_DATA);
+				data = (R300_MEM_RBS_POSITION_C_MASK & temp);
+			} else {
+				temp = RREG32(R300_MC_READ_CNTL_AB);
+				data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+			}
+		} else {
+			temp = RREG32(R300_MC_READ_CNTL_AB);
+			data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+		}
+		if (rdev->family == CHIP_RV410 ||
+		    rdev->family == CHIP_R420 ||
+		    rdev->family == CHIP_R423)
+			trbs_ff = memtrbs_r4xx[data];
+		else
+			trbs_ff = memtrbs[data];
+		tcas_ff.full += trbs_ff.full;
+	}
+
+	sclk_eff_ff.full = sclk_ff.full;
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		fixed20_12 agpmode_ff;
+		agpmode_ff.full = dfixed_const(radeon_agpmode);
+		temp_ff.full = dfixed_const_666(16);
+		sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
+	}
+	/* TODO PCIE lanes may affect this - agpmode == 16?? */
+
+	if (ASIC_IS_R300(rdev)) {
+		sclk_delay_ff.full = dfixed_const(250);
+	} else {
+		if ((rdev->family == CHIP_RV100) ||
+		    rdev->flags & RADEON_IS_IGP) {
+			if (rdev->mc.vram_is_ddr)
+				sclk_delay_ff.full = dfixed_const(41);
+			else
+				sclk_delay_ff.full = dfixed_const(33);
+		} else {
+			if (rdev->mc.vram_width == 128)
+				sclk_delay_ff.full = dfixed_const(57);
+			else
+				sclk_delay_ff.full = dfixed_const(41);
+		}
+	}
+
+	mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
+
+	if (rdev->mc.vram_is_ddr) {
+		if (rdev->mc.vram_width == 32) {
+			k1.full = dfixed_const(40);
+			c  = 3;
+		} else {
+			k1.full = dfixed_const(20);
+			c  = 1;
+		}
+	} else {
+		k1.full = dfixed_const(40);
+		c  = 3;
+	}
+
+	temp_ff.full = dfixed_const(2);
+	mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+	temp_ff.full = dfixed_const(c);
+	mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+	temp_ff.full = dfixed_const(4);
+	mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+	mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
+	mc_latency_mclk.full += k1.full;
+
+	mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+	mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
+
+	/*
+	  HW cursor time assuming worst case of full size colour cursor.
+	*/
+	temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+	temp_ff.full += trcd_ff.full;
+	if (temp_ff.full < tras_ff.full)
+		temp_ff.full = tras_ff.full;
+	cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
+
+	temp_ff.full = dfixed_const(cur_size);
+	cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
+	/*
+	  Find the total latency for the display data.
+	*/
+	disp_latency_overhead.full = dfixed_const(8);
+	disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
+	mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
+	mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
+
+	if (mc_latency_mclk.full > mc_latency_sclk.full)
+		disp_latency.full = mc_latency_mclk.full;
+	else
+		disp_latency.full = mc_latency_sclk.full;
+
+	/* setup Max GRPH_STOP_REQ default value */
+	if (ASIC_IS_RV100(rdev))
+		max_stop_req = 0x5c;
+	else
+		max_stop_req = 0x7c;
+
+	if (mode1) {
+		/*  CRTC1
+		    Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
+		    GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
+		*/
+		stop_req = mode1->hdisplay * pixel_bytes1 / 16;
+
+		if (stop_req > max_stop_req)
+			stop_req = max_stop_req;
+
+		/*
+		  Find the drain rate of the display buffer.
+		*/
+		temp_ff.full = dfixed_const((16/pixel_bytes1));
+		disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
+
+		/*
+		  Find the critical point of the display buffer.
+		*/
+		crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+		crit_point_ff.full += dfixed_const_half(0);
+
+		critical_point = dfixed_trunc(crit_point_ff);
+
+		if (rdev->disp_priority == 2) {
+			critical_point = 0;
+		}
+
+		/*
+		  The critical point should never be above max_stop_req-4.  Setting
+		  GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
+		*/
+		if (max_stop_req - critical_point < 4)
+			critical_point = 0;
+
+		if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
+			/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
+			critical_point = 0x10;
+		}
+
+		temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
+		temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
+		temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+		temp &= ~(RADEON_GRPH_START_REQ_MASK);
+		if ((rdev->family == CHIP_R350) &&
+		    (stop_req > 0x15)) {
+			stop_req -= 0x10;
+		}
+		temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+		temp |= RADEON_GRPH_BUFFER_SIZE;
+		temp &= ~(RADEON_GRPH_CRITICAL_CNTL   |
+			  RADEON_GRPH_CRITICAL_AT_SOF |
+			  RADEON_GRPH_STOP_CNTL);
+		/*
+		  Write the result into the register.
+		*/
+		WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+						       (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+#if 0
+		if ((rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+			/* attempt to program RS400 disp regs correctly ??? */
+			temp = RREG32(RS400_DISP1_REG_CNTL);
+			temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
+				  RS400_DISP1_STOP_REQ_LEVEL_MASK);
+			WREG32(RS400_DISP1_REQ_CNTL1, (temp |
+						       (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+						       (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+			temp = RREG32(RS400_DMIF_MEM_CNTL1);
+			temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
+				  RS400_DISP1_CRITICAL_POINT_STOP_MASK);
+			WREG32(RS400_DMIF_MEM_CNTL1, (temp |
+						      (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
+						      (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
+		}
+#endif
+
+		DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
+			  /* 	  (unsigned int)info->SavedReg->grph_buffer_cntl, */
+			  (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
+	}
+
+	if (mode2) {
+		u32 grph2_cntl;
+		stop_req = mode2->hdisplay * pixel_bytes2 / 16;
+
+		if (stop_req > max_stop_req)
+			stop_req = max_stop_req;
+
+		/*
+		  Find the drain rate of the display buffer.
+		*/
+		temp_ff.full = dfixed_const((16/pixel_bytes2));
+		disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
+
+		grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
+		grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
+		grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+		grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
+		if ((rdev->family == CHIP_R350) &&
+		    (stop_req > 0x15)) {
+			stop_req -= 0x10;
+		}
+		grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+		grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
+		grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL   |
+			  RADEON_GRPH_CRITICAL_AT_SOF |
+			  RADEON_GRPH_STOP_CNTL);
+
+		if ((rdev->family == CHIP_RS100) ||
+		    (rdev->family == CHIP_RS200))
+			critical_point2 = 0;
+		else {
+			temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
+			temp_ff.full = dfixed_const(temp);
+			temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
+			if (sclk_ff.full < temp_ff.full)
+				temp_ff.full = sclk_ff.full;
+
+			read_return_rate.full = temp_ff.full;
+
+			if (mode1) {
+				temp_ff.full = read_return_rate.full - disp_drain_rate.full;
+				time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
+			} else {
+				time_disp1_drop_priority.full = 0;
+			}
+			crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
+			crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+			crit_point_ff.full += dfixed_const_half(0);
+
+			critical_point2 = dfixed_trunc(crit_point_ff);
+
+			if (rdev->disp_priority == 2) {
+				critical_point2 = 0;
+			}
+
+			if (max_stop_req - critical_point2 < 4)
+				critical_point2 = 0;
+
+		}
+
+		if (critical_point2 == 0 && rdev->family == CHIP_R300) {
+			/* some R300 cards have problem with this set to 0 */
+			critical_point2 = 0x10;
+		}
+
+		WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+						  (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+		if ((rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+#if 0
+			/* attempt to program RS400 disp2 regs correctly ??? */
+			temp = RREG32(RS400_DISP2_REQ_CNTL1);
+			temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
+				  RS400_DISP2_STOP_REQ_LEVEL_MASK);
+			WREG32(RS400_DISP2_REQ_CNTL1, (temp |
+						       (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+						       (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+			temp = RREG32(RS400_DISP2_REQ_CNTL2);
+			temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
+				  RS400_DISP2_CRITICAL_POINT_STOP_MASK);
+			WREG32(RS400_DISP2_REQ_CNTL2, (temp |
+						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
+						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
+#endif
+			WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
+			WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
+			WREG32(RS400_DMIF_MEM_CNTL1,  0x29CA71DC);
+			WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
+		}
+
+		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
+			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
+	}
+}
+
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 2);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		radeon_scratch_free(rdev, scratch);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET0(scratch, 0));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF) {
+			break;
+		}
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test succeeded in %d usecs\n", i);
+	} else {
+		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	if (ring->rptr_save_reg) {
+		u32 next_rptr = ring->wptr + 2 + 3;
+		radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+	radeon_ring_write(ring, ib->gpu_addr);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		goto free_scratch;
+	}
+	ib.ptr[0] = PACKET0(scratch, 0);
+	ib.ptr[1] = 0xDEADBEEF;
+	ib.ptr[2] = PACKET2(0);
+	ib.ptr[3] = PACKET2(0);
+	ib.ptr[4] = PACKET2(0);
+	ib.ptr[5] = PACKET2(0);
+	ib.ptr[6] = PACKET2(0);
+	ib.ptr[7] = PACKET2(0);
+	ib.length_dw = 8;
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		goto free_ib;
+	}
+	r = radeon_fence_wait(ib.fence, false);
+	if (r) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto free_ib;
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF) {
+			break;
+		}
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test succeeded in %u usecs\n", i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+free_ib:
+	radeon_ib_free(rdev, &ib);
+free_scratch:
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
+{
+	/* Shutdown CP we shouldn't need to do that but better be safe than
+	 * sorry
+	 */
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	WREG32(R_000740_CP_CSQ_CNTL, 0);
+
+	/* Save few CRTC registers */
+	save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
+	save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
+	save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
+	save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
+		save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
+	}
+
+	/* Disable VGA aperture access */
+	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
+	/* Disable cursor, overlay, crtc */
+	WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
+	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
+					S_000054_CRTC_DISPLAY_DIS(1));
+	WREG32(R_000050_CRTC_GEN_CNTL,
+			(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
+			S_000050_CRTC_DISP_REQ_EN_B(1));
+	WREG32(R_000420_OV0_SCALE_CNTL,
+		C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
+	WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
+						S_000360_CUR2_LOCK(1));
+		WREG32(R_0003F8_CRTC2_GEN_CNTL,
+			(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
+			S_0003F8_CRTC2_DISPLAY_DIS(1) |
+			S_0003F8_CRTC2_DISP_REQ_EN_B(1));
+		WREG32(R_000360_CUR2_OFFSET,
+			C_000360_CUR2_LOCK & save->CUR2_OFFSET);
+	}
+}
+
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
+{
+	/* Update base address for crtc */
+	WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
+	}
+	/* Restore CRTC registers */
+	WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
+	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
+	WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
+	}
+}
+
+void r100_vga_render_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG8(R_0003C2_GENMO_WT);
+	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
+}
+
+static void r100_debugfs(struct radeon_device *rdev)
+{
+	int r;
+
+	r = r100_debugfs_mc_info_init(rdev);
+	if (r)
+		dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+}
+
+static void r100_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(R_00014C_MC_AGP_LOCATION,
+			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		if (rdev->family > CHIP_RV200)
+			WREG32(R_00015C_AGP_BASE_2,
+				upper_32_bits(rdev->mc.agp_base) & 0xff);
+	} else {
+		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+		WREG32(R_000170_AGP_BASE, 0);
+		if (rdev->family > CHIP_RV200)
+			WREG32(R_00015C_AGP_BASE_2, 0);
+	}
+	/* Wait for mc idle */
+	if (r100_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	r100_mc_resume(rdev, &save);
+}
+
+static void r100_clock_startup(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_legacy_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
+		tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r100_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r100_mc_program(rdev);
+	/* Resume clock */
+	r100_clock_startup(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r100_enable_bm(rdev);
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r100_irq_set(rdev);
+	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r100_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r100_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r100_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r100_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r100_suspend(struct radeon_device *rdev)
+{
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r100_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	r100_cp_fini_microcode(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+/*
+ * Due to how kexec works, it can leave the hw fully initialised when it
+ * boots the new kernel. However doing our init sequence with the CP and
+ * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
+ * do some quick sanity checks and restore sane values to avoid this
+ * problem.
+ */
+void r100_restore_sanity(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(RADEON_CP_CSQ_CNTL);
+	if (tmp) {
+		WREG32(RADEON_CP_CSQ_CNTL, 0);
+	}
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	if (tmp) {
+		WREG32(RADEON_CP_RB_CNTL, 0);
+	}
+	tmp = RREG32(RADEON_SCRATCH_UMSK);
+	if (tmp) {
+		WREG32(RADEON_SCRATCH_UMSK, 0);
+	}
+}
+
+int r100_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Register debugfs file specific to this group of asics */
+	r100_debugfs(rdev);
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* sanity check some register to avoid hangs like after kexec */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Set asic errata */
+	r100_errata(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize VRAM */
+	r100_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r100_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = r100_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+		      bool always_indirect)
+{
+	if (reg < rdev->rmmio_size && !always_indirect)
+		return bus_read_4(rdev->rmmio, reg);
+	else {
+		unsigned long flags;
+		uint32_t ret;
+
+		DRM_SPINLOCK_IRQSAVE(&rdev->mmio_idx_lock, flags);
+		bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg);
+		ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA);
+		DRM_SPINUNLOCK_IRQRESTORE(&rdev->mmio_idx_lock, flags);
+
+		return ret;
+	}
+}
+
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+		  bool always_indirect)
+{
+	if (reg < rdev->rmmio_size && !always_indirect)
+		bus_write_4(rdev->rmmio, reg, v);
+	else {
+		unsigned long flags;
+
+		DRM_SPINLOCK_IRQSAVE(&rdev->mmio_idx_lock, flags);
+		bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg);
+		bus_write_4(rdev->rmmio, RADEON_MM_DATA, v);
+		DRM_SPINUNLOCK_IRQRESTORE(&rdev->mmio_idx_lock, flags);
+	}
+}
+
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
+{
+	if (reg < rdev->rio_mem_size)
+		return bus_read_4(rdev->rio_mem, reg);
+	else {
+		/* XXX No locking? -- dumbbell@ */
+		bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg);
+		return bus_read_4(rdev->rio_mem, RADEON_MM_DATA);
+	}
+}
+
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	if (reg < rdev->rio_mem_size)
+		bus_write_4(rdev->rio_mem, reg, v);
+	else {
+		/* XXX No locking? -- dumbbell@ */
+		bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg);
+		bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v);
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/r100.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r100_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r100_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r100_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,32 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r100_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned r100_reg_safe_bm[102] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+	0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+	0xFFFFFFCF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFF9F, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x38E7FE1F, 0xFFC3FF8E, 0x7FF8FFFF, 0xFFFF803C,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFEFFFF, 0xFFFFFFFF,
+	0x00000000, 0xFFFFFFFD, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFCFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFEF,
+};


Property changes on: trunk/sys/dev/drm2/radeon/r100_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r100_track.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r100_track.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r100_track.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,104 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r100_track.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#define R100_TRACK_MAX_TEXTURE 3
+#define R200_TRACK_MAX_TEXTURE 6
+#define R300_TRACK_MAX_TEXTURE 16
+
+#define R100_MAX_CB 1
+#define R300_MAX_CB 4
+
+/*
+ * CS functions
+ */
+struct r100_cs_track_cb {
+	struct radeon_bo	*robj;
+	unsigned		pitch;
+	unsigned		cpp;
+	unsigned		offset;
+};
+
+struct r100_cs_track_array {
+	struct radeon_bo	*robj;
+	unsigned		esize;
+};
+
+struct r100_cs_cube_info {
+	struct radeon_bo	*robj;
+	unsigned		offset;
+	unsigned		width;
+	unsigned		height;
+};
+
+#define R100_TRACK_COMP_NONE   0
+#define R100_TRACK_COMP_DXT1   1
+#define R100_TRACK_COMP_DXT35  2
+
+struct r100_cs_track_texture {
+	struct radeon_bo	*robj;
+	struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
+	unsigned		pitch;
+	unsigned		width;
+	unsigned		height;
+	unsigned		num_levels;
+	unsigned		cpp;
+	unsigned		tex_coord_type;
+	unsigned		txdepth;
+	unsigned		width_11;
+	unsigned		height_11;
+	bool			use_pitch;
+	bool			enabled;
+	bool                    lookup_disable;
+	bool			roundup_w;
+	bool			roundup_h;
+	unsigned                compress_format;
+};
+
+struct r100_cs_track {
+	unsigned			num_cb;
+	unsigned                        num_texture;
+	unsigned			maxy;
+	unsigned			vtx_size;
+	unsigned			vap_vf_cntl;
+	unsigned			vap_alt_nverts;
+	unsigned			immd_dwords;
+	unsigned			num_arrays;
+	unsigned			max_indx;
+	unsigned			color_channel_mask;
+	struct r100_cs_track_array	arrays[16];
+	struct r100_cs_track_cb 	cb[R300_MAX_CB];
+	struct r100_cs_track_cb 	zb;
+	struct r100_cs_track_cb 	aa;
+	struct r100_cs_track_texture	textures[R300_TRACK_MAX_TEXTURE];
+	bool				z_enabled;
+	bool                            separate_cube;
+	bool				zb_cb_clear;
+	bool				blend_read_enable;
+	bool				cb_dirty;
+	bool				zb_dirty;
+	bool				tex_dirty;
+	bool				aa_dirty;
+	bool				aaresolve;
+};
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
+			      struct radeon_cs_reloc **cs_reloc);
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+			 struct radeon_cs_packet *pkt);
+
+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
+
+int r200_packet0_check(struct radeon_cs_parser *p,
+		       struct radeon_cs_packet *pkt,
+		       unsigned idx, unsigned reg);
+
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+			    struct radeon_cs_packet *pkt,
+			    unsigned idx,
+			    unsigned reg);
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+			     struct radeon_cs_packet *pkt,
+			     int idx);


Property changes on: trunk/sys/dev/drm2/radeon/r100_track.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r100d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r100d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r100d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,884 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R100D_H__
+#define __R100D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r100d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_3D_CLEAR_ZMASK		0x32
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_3D_CLEAR_HIZ		0x37
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+
+/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_SE(x)                    (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_SE(x)                    (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_SE                       0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define R_000030_BUS_CNTL                            0x000030
+#define   S_000030_BUS_DBL_RESYNC(x)                   (((x) & 0x1) << 0)
+#define   G_000030_BUS_DBL_RESYNC(x)                   (((x) >> 0) & 0x1)
+#define   C_000030_BUS_DBL_RESYNC                      0xFFFFFFFE
+#define   S_000030_BUS_MSTR_RESET(x)                   (((x) & 0x1) << 1)
+#define   G_000030_BUS_MSTR_RESET(x)                   (((x) >> 1) & 0x1)
+#define   C_000030_BUS_MSTR_RESET                      0xFFFFFFFD
+#define   S_000030_BUS_FLUSH_BUF(x)                    (((x) & 0x1) << 2)
+#define   G_000030_BUS_FLUSH_BUF(x)                    (((x) >> 2) & 0x1)
+#define   C_000030_BUS_FLUSH_BUF                       0xFFFFFFFB
+#define   S_000030_BUS_STOP_REQ_DIS(x)                 (((x) & 0x1) << 3)
+#define   G_000030_BUS_STOP_REQ_DIS(x)                 (((x) >> 3) & 0x1)
+#define   C_000030_BUS_STOP_REQ_DIS                    0xFFFFFFF7
+#define   S_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) & 0x1) << 4)
+#define   G_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) >> 4) & 0x1)
+#define   C_000030_BUS_PM4_READ_COMBINE_EN             0xFFFFFFEF
+#define   S_000030_BUS_WRT_COMBINE_EN(x)               (((x) & 0x1) << 5)
+#define   G_000030_BUS_WRT_COMBINE_EN(x)               (((x) >> 5) & 0x1)
+#define   C_000030_BUS_WRT_COMBINE_EN                  0xFFFFFFDF
+#define   S_000030_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 6)
+#define   G_000030_BUS_MASTER_DIS(x)                   (((x) >> 6) & 0x1)
+#define   C_000030_BUS_MASTER_DIS                      0xFFFFFFBF
+#define   S_000030_BIOS_ROM_WRT_EN(x)                  (((x) & 0x1) << 7)
+#define   G_000030_BIOS_ROM_WRT_EN(x)                  (((x) >> 7) & 0x1)
+#define   C_000030_BIOS_ROM_WRT_EN                     0xFFFFFF7F
+#define   S_000030_BM_DAC_CRIPPLE(x)                   (((x) & 0x1) << 8)
+#define   G_000030_BM_DAC_CRIPPLE(x)                   (((x) >> 8) & 0x1)
+#define   C_000030_BM_DAC_CRIPPLE                      0xFFFFFEFF
+#define   S_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) & 0x1) << 9)
+#define   G_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) >> 9) & 0x1)
+#define   C_000030_BUS_NON_PM4_READ_COMBINE_EN         0xFFFFFDFF
+#define   S_000030_BUS_XFERD_DISCARD_EN(x)             (((x) & 0x1) << 10)
+#define   G_000030_BUS_XFERD_DISCARD_EN(x)             (((x) >> 10) & 0x1)
+#define   C_000030_BUS_XFERD_DISCARD_EN                0xFFFFFBFF
+#define   S_000030_BUS_SGL_READ_DISABLE(x)             (((x) & 0x1) << 11)
+#define   G_000030_BUS_SGL_READ_DISABLE(x)             (((x) >> 11) & 0x1)
+#define   C_000030_BUS_SGL_READ_DISABLE                0xFFFFF7FF
+#define   S_000030_BIOS_DIS_ROM(x)                     (((x) & 0x1) << 12)
+#define   G_000030_BIOS_DIS_ROM(x)                     (((x) >> 12) & 0x1)
+#define   C_000030_BIOS_DIS_ROM                        0xFFFFEFFF
+#define   S_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) & 0x1) << 13)
+#define   G_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) >> 13) & 0x1)
+#define   C_000030_BUS_PCI_READ_RETRY_EN               0xFFFFDFFF
+#define   S_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) & 0x1) << 14)
+#define   G_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) >> 14) & 0x1)
+#define   C_000030_BUS_AGP_AD_STEPPING_EN              0xFFFFBFFF
+#define   S_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) & 0x1) << 15)
+#define   G_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) >> 15) & 0x1)
+#define   C_000030_BUS_PCI_WRT_RETRY_EN                0xFFFF7FFF
+#define   S_000030_BUS_RETRY_WS(x)                     (((x) & 0xF) << 16)
+#define   G_000030_BUS_RETRY_WS(x)                     (((x) >> 16) & 0xF)
+#define   C_000030_BUS_RETRY_WS                        0xFFF0FFFF
+#define   S_000030_BUS_MSTR_RD_MULT(x)                 (((x) & 0x1) << 20)
+#define   G_000030_BUS_MSTR_RD_MULT(x)                 (((x) >> 20) & 0x1)
+#define   C_000030_BUS_MSTR_RD_MULT                    0xFFEFFFFF
+#define   S_000030_BUS_MSTR_RD_LINE(x)                 (((x) & 0x1) << 21)
+#define   G_000030_BUS_MSTR_RD_LINE(x)                 (((x) >> 21) & 0x1)
+#define   C_000030_BUS_MSTR_RD_LINE                    0xFFDFFFFF
+#define   S_000030_BUS_SUSPEND(x)                      (((x) & 0x1) << 22)
+#define   G_000030_BUS_SUSPEND(x)                      (((x) >> 22) & 0x1)
+#define   C_000030_BUS_SUSPEND                         0xFFBFFFFF
+#define   S_000030_LAT_16X(x)                          (((x) & 0x1) << 23)
+#define   G_000030_LAT_16X(x)                          (((x) >> 23) & 0x1)
+#define   C_000030_LAT_16X                             0xFF7FFFFF
+#define   S_000030_BUS_RD_DISCARD_EN(x)                (((x) & 0x1) << 24)
+#define   G_000030_BUS_RD_DISCARD_EN(x)                (((x) >> 24) & 0x1)
+#define   C_000030_BUS_RD_DISCARD_EN                   0xFEFFFFFF
+#define   S_000030_ENFRCWRDY(x)                        (((x) & 0x1) << 25)
+#define   G_000030_ENFRCWRDY(x)                        (((x) >> 25) & 0x1)
+#define   C_000030_ENFRCWRDY                           0xFDFFFFFF
+#define   S_000030_BUS_MSTR_WS(x)                      (((x) & 0x1) << 26)
+#define   G_000030_BUS_MSTR_WS(x)                      (((x) >> 26) & 0x1)
+#define   C_000030_BUS_MSTR_WS                         0xFBFFFFFF
+#define   S_000030_BUS_PARKING_DIS(x)                  (((x) & 0x1) << 27)
+#define   G_000030_BUS_PARKING_DIS(x)                  (((x) >> 27) & 0x1)
+#define   C_000030_BUS_PARKING_DIS                     0xF7FFFFFF
+#define   S_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) & 0x1) << 28)
+#define   G_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) >> 28) & 0x1)
+#define   C_000030_BUS_MSTR_DISCONNECT_EN              0xEFFFFFFF
+#define   S_000030_SERR_EN(x)                          (((x) & 0x1) << 29)
+#define   G_000030_SERR_EN(x)                          (((x) >> 29) & 0x1)
+#define   C_000030_SERR_EN                             0xDFFFFFFF
+#define   S_000030_BUS_READ_BURST(x)                   (((x) & 0x1) << 30)
+#define   G_000030_BUS_READ_BURST(x)                   (((x) >> 30) & 0x1)
+#define   C_000030_BUS_READ_BURST                      0xBFFFFFFF
+#define   S_000030_BUS_RDY_READ_DLY(x)                 (((x) & 0x1) << 31)
+#define   G_000030_BUS_RDY_READ_DLY(x)                 (((x) >> 31) & 0x1)
+#define   C_000030_BUS_RDY_READ_DLY                    0x7FFFFFFF
+#define R_000040_GEN_INT_CNTL                        0x000040
+#define   S_000040_CRTC_VBLANK(x)                      (((x) & 0x1) << 0)
+#define   G_000040_CRTC_VBLANK(x)                      (((x) >> 0) & 0x1)
+#define   C_000040_CRTC_VBLANK                         0xFFFFFFFE
+#define   S_000040_CRTC_VLINE(x)                       (((x) & 0x1) << 1)
+#define   G_000040_CRTC_VLINE(x)                       (((x) >> 1) & 0x1)
+#define   C_000040_CRTC_VLINE                          0xFFFFFFFD
+#define   S_000040_CRTC_VSYNC(x)                       (((x) & 0x1) << 2)
+#define   G_000040_CRTC_VSYNC(x)                       (((x) >> 2) & 0x1)
+#define   C_000040_CRTC_VSYNC                          0xFFFFFFFB
+#define   S_000040_SNAPSHOT(x)                         (((x) & 0x1) << 3)
+#define   G_000040_SNAPSHOT(x)                         (((x) >> 3) & 0x1)
+#define   C_000040_SNAPSHOT                            0xFFFFFFF7
+#define   S_000040_FP_DETECT(x)                        (((x) & 0x1) << 4)
+#define   G_000040_FP_DETECT(x)                        (((x) >> 4) & 0x1)
+#define   C_000040_FP_DETECT                           0xFFFFFFEF
+#define   S_000040_CRTC2_VLINE(x)                      (((x) & 0x1) << 5)
+#define   G_000040_CRTC2_VLINE(x)                      (((x) >> 5) & 0x1)
+#define   C_000040_CRTC2_VLINE                         0xFFFFFFDF
+#define   S_000040_DMA_VIPH0_INT_EN(x)                 (((x) & 0x1) << 12)
+#define   G_000040_DMA_VIPH0_INT_EN(x)                 (((x) >> 12) & 0x1)
+#define   C_000040_DMA_VIPH0_INT_EN                    0xFFFFEFFF
+#define   S_000040_CRTC2_VSYNC(x)                      (((x) & 0x1) << 6)
+#define   G_000040_CRTC2_VSYNC(x)                      (((x) >> 6) & 0x1)
+#define   C_000040_CRTC2_VSYNC                         0xFFFFFFBF
+#define   S_000040_SNAPSHOT2(x)                        (((x) & 0x1) << 7)
+#define   G_000040_SNAPSHOT2(x)                        (((x) >> 7) & 0x1)
+#define   C_000040_SNAPSHOT2                           0xFFFFFF7F
+#define   S_000040_CRTC2_VBLANK(x)                     (((x) & 0x1) << 9)
+#define   G_000040_CRTC2_VBLANK(x)                     (((x) >> 9) & 0x1)
+#define   C_000040_CRTC2_VBLANK                        0xFFFFFDFF
+#define   S_000040_FP2_DETECT(x)                       (((x) & 0x1) << 10)
+#define   G_000040_FP2_DETECT(x)                       (((x) >> 10) & 0x1)
+#define   C_000040_FP2_DETECT                          0xFFFFFBFF
+#define   S_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) & 0x1) << 11)
+#define   G_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) >> 11) & 0x1)
+#define   C_000040_VSYNC_DIFF_OVER_LIMIT               0xFFFFF7FF
+#define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
+#define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
+#define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
+#define   S_000040_DMA_VIPH2_INT_EN(x)                 (((x) & 0x1) << 14)
+#define   G_000040_DMA_VIPH2_INT_EN(x)                 (((x) >> 14) & 0x1)
+#define   C_000040_DMA_VIPH2_INT_EN                    0xFFFFBFFF
+#define   S_000040_DMA_VIPH3_INT_EN(x)                 (((x) & 0x1) << 15)
+#define   G_000040_DMA_VIPH3_INT_EN(x)                 (((x) >> 15) & 0x1)
+#define   C_000040_DMA_VIPH3_INT_EN                    0xFFFF7FFF
+#define   S_000040_I2C_INT_EN(x)                       (((x) & 0x1) << 17)
+#define   G_000040_I2C_INT_EN(x)                       (((x) >> 17) & 0x1)
+#define   C_000040_I2C_INT_EN                          0xFFFDFFFF
+#define   S_000040_GUI_IDLE(x)                         (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE(x)                         (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE                            0xFFF7FFFF
+#define   S_000040_VIPH_INT_EN(x)                      (((x) & 0x1) << 24)
+#define   G_000040_VIPH_INT_EN(x)                      (((x) >> 24) & 0x1)
+#define   C_000040_VIPH_INT_EN                         0xFEFFFFFF
+#define   S_000040_SW_INT_EN(x)                        (((x) & 0x1) << 25)
+#define   G_000040_SW_INT_EN(x)                        (((x) >> 25) & 0x1)
+#define   C_000040_SW_INT_EN                           0xFDFFFFFF
+#define   S_000040_GEYSERVILLE(x)                      (((x) & 0x1) << 27)
+#define   G_000040_GEYSERVILLE(x)                      (((x) >> 27) & 0x1)
+#define   C_000040_GEYSERVILLE                         0xF7FFFFFF
+#define   S_000040_HDCP_AUTHORIZED_INT(x)              (((x) & 0x1) << 28)
+#define   G_000040_HDCP_AUTHORIZED_INT(x)              (((x) >> 28) & 0x1)
+#define   C_000040_HDCP_AUTHORIZED_INT                 0xEFFFFFFF
+#define   S_000040_DVI_I2C_INT(x)                      (((x) & 0x1) << 29)
+#define   G_000040_DVI_I2C_INT(x)                      (((x) >> 29) & 0x1)
+#define   C_000040_DVI_I2C_INT                         0xDFFFFFFF
+#define   S_000040_GUIDMA(x)                           (((x) & 0x1) << 30)
+#define   G_000040_GUIDMA(x)                           (((x) >> 30) & 0x1)
+#define   C_000040_GUIDMA                              0xBFFFFFFF
+#define   S_000040_VIDDMA(x)                           (((x) & 0x1) << 31)
+#define   G_000040_VIDDMA(x)                           (((x) >> 31) & 0x1)
+#define   C_000040_VIDDMA                              0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS                      0x000044
+#define   S_000044_CRTC_VBLANK_STAT(x)                 (((x) & 0x1) << 0)
+#define   G_000044_CRTC_VBLANK_STAT(x)                 (((x) >> 0) & 0x1)
+#define   C_000044_CRTC_VBLANK_STAT                    0xFFFFFFFE
+#define   S_000044_CRTC_VBLANK_STAT_AK(x)              (((x) & 0x1) << 0)
+#define   G_000044_CRTC_VBLANK_STAT_AK(x)              (((x) >> 0) & 0x1)
+#define   C_000044_CRTC_VBLANK_STAT_AK                 0xFFFFFFFE
+#define   S_000044_CRTC_VLINE_STAT(x)                  (((x) & 0x1) << 1)
+#define   G_000044_CRTC_VLINE_STAT(x)                  (((x) >> 1) & 0x1)
+#define   C_000044_CRTC_VLINE_STAT                     0xFFFFFFFD
+#define   S_000044_CRTC_VLINE_STAT_AK(x)               (((x) & 0x1) << 1)
+#define   G_000044_CRTC_VLINE_STAT_AK(x)               (((x) >> 1) & 0x1)
+#define   C_000044_CRTC_VLINE_STAT_AK                  0xFFFFFFFD
+#define   S_000044_CRTC_VSYNC_STAT(x)                  (((x) & 0x1) << 2)
+#define   G_000044_CRTC_VSYNC_STAT(x)                  (((x) >> 2) & 0x1)
+#define   C_000044_CRTC_VSYNC_STAT                     0xFFFFFFFB
+#define   S_000044_CRTC_VSYNC_STAT_AK(x)               (((x) & 0x1) << 2)
+#define   G_000044_CRTC_VSYNC_STAT_AK(x)               (((x) >> 2) & 0x1)
+#define   C_000044_CRTC_VSYNC_STAT_AK                  0xFFFFFFFB
+#define   S_000044_SNAPSHOT_STAT(x)                    (((x) & 0x1) << 3)
+#define   G_000044_SNAPSHOT_STAT(x)                    (((x) >> 3) & 0x1)
+#define   C_000044_SNAPSHOT_STAT                       0xFFFFFFF7
+#define   S_000044_SNAPSHOT_STAT_AK(x)                 (((x) & 0x1) << 3)
+#define   G_000044_SNAPSHOT_STAT_AK(x)                 (((x) >> 3) & 0x1)
+#define   C_000044_SNAPSHOT_STAT_AK                    0xFFFFFFF7
+#define   S_000044_FP_DETECT_STAT(x)                   (((x) & 0x1) << 4)
+#define   G_000044_FP_DETECT_STAT(x)                   (((x) >> 4) & 0x1)
+#define   C_000044_FP_DETECT_STAT                      0xFFFFFFEF
+#define   S_000044_FP_DETECT_STAT_AK(x)                (((x) & 0x1) << 4)
+#define   G_000044_FP_DETECT_STAT_AK(x)                (((x) >> 4) & 0x1)
+#define   C_000044_FP_DETECT_STAT_AK                   0xFFFFFFEF
+#define   S_000044_CRTC2_VLINE_STAT(x)                 (((x) & 0x1) << 5)
+#define   G_000044_CRTC2_VLINE_STAT(x)                 (((x) >> 5) & 0x1)
+#define   C_000044_CRTC2_VLINE_STAT                    0xFFFFFFDF
+#define   S_000044_CRTC2_VLINE_STAT_AK(x)              (((x) & 0x1) << 5)
+#define   G_000044_CRTC2_VLINE_STAT_AK(x)              (((x) >> 5) & 0x1)
+#define   C_000044_CRTC2_VLINE_STAT_AK                 0xFFFFFFDF
+#define   S_000044_CRTC2_VSYNC_STAT(x)                 (((x) & 0x1) << 6)
+#define   G_000044_CRTC2_VSYNC_STAT(x)                 (((x) >> 6) & 0x1)
+#define   C_000044_CRTC2_VSYNC_STAT                    0xFFFFFFBF
+#define   S_000044_CRTC2_VSYNC_STAT_AK(x)              (((x) & 0x1) << 6)
+#define   G_000044_CRTC2_VSYNC_STAT_AK(x)              (((x) >> 6) & 0x1)
+#define   C_000044_CRTC2_VSYNC_STAT_AK                 0xFFFFFFBF
+#define   S_000044_SNAPSHOT2_STAT(x)                   (((x) & 0x1) << 7)
+#define   G_000044_SNAPSHOT2_STAT(x)                   (((x) >> 7) & 0x1)
+#define   C_000044_SNAPSHOT2_STAT                      0xFFFFFF7F
+#define   S_000044_SNAPSHOT2_STAT_AK(x)                (((x) & 0x1) << 7)
+#define   G_000044_SNAPSHOT2_STAT_AK(x)                (((x) >> 7) & 0x1)
+#define   C_000044_SNAPSHOT2_STAT_AK                   0xFFFFFF7F
+#define   S_000044_CAP0_INT_ACTIVE(x)                  (((x) & 0x1) << 8)
+#define   G_000044_CAP0_INT_ACTIVE(x)                  (((x) >> 8) & 0x1)
+#define   C_000044_CAP0_INT_ACTIVE                     0xFFFFFEFF
+#define   S_000044_CRTC2_VBLANK_STAT(x)                (((x) & 0x1) << 9)
+#define   G_000044_CRTC2_VBLANK_STAT(x)                (((x) >> 9) & 0x1)
+#define   C_000044_CRTC2_VBLANK_STAT                   0xFFFFFDFF
+#define   S_000044_CRTC2_VBLANK_STAT_AK(x)             (((x) & 0x1) << 9)
+#define   G_000044_CRTC2_VBLANK_STAT_AK(x)             (((x) >> 9) & 0x1)
+#define   C_000044_CRTC2_VBLANK_STAT_AK                0xFFFFFDFF
+#define   S_000044_FP2_DETECT_STAT(x)                  (((x) & 0x1) << 10)
+#define   G_000044_FP2_DETECT_STAT(x)                  (((x) >> 10) & 0x1)
+#define   C_000044_FP2_DETECT_STAT                     0xFFFFFBFF
+#define   S_000044_FP2_DETECT_STAT_AK(x)               (((x) & 0x1) << 10)
+#define   G_000044_FP2_DETECT_STAT_AK(x)               (((x) >> 10) & 0x1)
+#define   C_000044_FP2_DETECT_STAT_AK                  0xFFFFFBFF
+#define   S_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x)       (((x) & 0x1) << 11)
+#define   G_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x)       (((x) >> 11) & 0x1)
+#define   C_000044_VSYNC_DIFF_OVER_LIMIT_STAT          0xFFFFF7FF
+#define   S_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x)    (((x) & 0x1) << 11)
+#define   G_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x)    (((x) >> 11) & 0x1)
+#define   C_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK       0xFFFFF7FF
+#define   S_000044_DMA_VIPH0_INT(x)                    (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT(x)                    (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT                       0xFFFFEFFF
+#define   S_000044_DMA_VIPH0_INT_AK(x)                 (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT_AK(x)                 (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT_AK                    0xFFFFEFFF
+#define   S_000044_DMA_VIPH1_INT(x)                    (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT(x)                    (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT                       0xFFFFDFFF
+#define   S_000044_DMA_VIPH1_INT_AK(x)                 (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT_AK(x)                 (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT_AK                    0xFFFFDFFF
+#define   S_000044_DMA_VIPH2_INT(x)                    (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT(x)                    (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT                       0xFFFFBFFF
+#define   S_000044_DMA_VIPH2_INT_AK(x)                 (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT_AK(x)                 (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT_AK                    0xFFFFBFFF
+#define   S_000044_DMA_VIPH3_INT(x)                    (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT(x)                    (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT                       0xFFFF7FFF
+#define   S_000044_DMA_VIPH3_INT_AK(x)                 (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT_AK(x)                 (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT_AK                    0xFFFF7FFF
+#define   S_000044_I2C_INT(x)                          (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT(x)                          (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT                             0xFFFDFFFF
+#define   S_000044_I2C_INT_AK(x)                       (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT_AK(x)                       (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT_AK                          0xFFFDFFFF
+#define   S_000044_GUI_IDLE_STAT(x)                    (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT(x)                    (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT                       0xFFF7FFFF
+#define   S_000044_GUI_IDLE_STAT_AK(x)                 (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT_AK(x)                 (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT_AK                    0xFFF7FFFF
+#define   S_000044_VIPH_INT(x)                         (((x) & 0x1) << 24)
+#define   G_000044_VIPH_INT(x)                         (((x) >> 24) & 0x1)
+#define   C_000044_VIPH_INT                            0xFEFFFFFF
+#define   S_000044_SW_INT(x)                           (((x) & 0x1) << 25)
+#define   G_000044_SW_INT(x)                           (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT                              0xFDFFFFFF
+#define   S_000044_SW_INT_AK(x)                        (((x) & 0x1) << 25)
+#define   G_000044_SW_INT_AK(x)                        (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT_AK                           0xFDFFFFFF
+#define   S_000044_SW_INT_SET(x)                       (((x) & 0x1) << 26)
+#define   G_000044_SW_INT_SET(x)                       (((x) >> 26) & 0x1)
+#define   C_000044_SW_INT_SET                          0xFBFFFFFF
+#define   S_000044_GEYSERVILLE_STAT(x)                 (((x) & 0x1) << 27)
+#define   G_000044_GEYSERVILLE_STAT(x)                 (((x) >> 27) & 0x1)
+#define   C_000044_GEYSERVILLE_STAT                    0xF7FFFFFF
+#define   S_000044_GEYSERVILLE_STAT_AK(x)              (((x) & 0x1) << 27)
+#define   G_000044_GEYSERVILLE_STAT_AK(x)              (((x) >> 27) & 0x1)
+#define   C_000044_GEYSERVILLE_STAT_AK                 0xF7FFFFFF
+#define   S_000044_HDCP_AUTHORIZED_INT_STAT(x)         (((x) & 0x1) << 28)
+#define   G_000044_HDCP_AUTHORIZED_INT_STAT(x)         (((x) >> 28) & 0x1)
+#define   C_000044_HDCP_AUTHORIZED_INT_STAT            0xEFFFFFFF
+#define   S_000044_HDCP_AUTHORIZED_INT_AK(x)           (((x) & 0x1) << 28)
+#define   G_000044_HDCP_AUTHORIZED_INT_AK(x)           (((x) >> 28) & 0x1)
+#define   C_000044_HDCP_AUTHORIZED_INT_AK              0xEFFFFFFF
+#define   S_000044_DVI_I2C_INT_STAT(x)                 (((x) & 0x1) << 29)
+#define   G_000044_DVI_I2C_INT_STAT(x)                 (((x) >> 29) & 0x1)
+#define   C_000044_DVI_I2C_INT_STAT                    0xDFFFFFFF
+#define   S_000044_DVI_I2C_INT_AK(x)                   (((x) & 0x1) << 29)
+#define   G_000044_DVI_I2C_INT_AK(x)                   (((x) >> 29) & 0x1)
+#define   C_000044_DVI_I2C_INT_AK                      0xDFFFFFFF
+#define   S_000044_GUIDMA_STAT(x)                      (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_STAT(x)                      (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_STAT                         0xBFFFFFFF
+#define   S_000044_GUIDMA_AK(x)                        (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_AK(x)                        (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_AK                           0xBFFFFFFF
+#define   S_000044_VIDDMA_STAT(x)                      (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_STAT(x)                      (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_STAT                         0x7FFFFFFF
+#define   S_000044_VIDDMA_AK(x)                        (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_AK(x)                        (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_AK                           0x7FFFFFFF
+#define R_000050_CRTC_GEN_CNTL                       0x000050
+#define   S_000050_CRTC_DBL_SCAN_EN(x)                 (((x) & 0x1) << 0)
+#define   G_000050_CRTC_DBL_SCAN_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_000050_CRTC_DBL_SCAN_EN                    0xFFFFFFFE
+#define   S_000050_CRTC_INTERLACE_EN(x)                (((x) & 0x1) << 1)
+#define   G_000050_CRTC_INTERLACE_EN(x)                (((x) >> 1) & 0x1)
+#define   C_000050_CRTC_INTERLACE_EN                   0xFFFFFFFD
+#define   S_000050_CRTC_C_SYNC_EN(x)                   (((x) & 0x1) << 4)
+#define   G_000050_CRTC_C_SYNC_EN(x)                   (((x) >> 4) & 0x1)
+#define   C_000050_CRTC_C_SYNC_EN                      0xFFFFFFEF
+#define   S_000050_CRTC_PIX_WIDTH(x)                   (((x) & 0xF) << 8)
+#define   G_000050_CRTC_PIX_WIDTH(x)                   (((x) >> 8) & 0xF)
+#define   C_000050_CRTC_PIX_WIDTH                      0xFFFFF0FF
+#define   S_000050_CRTC_ICON_EN(x)                     (((x) & 0x1) << 15)
+#define   G_000050_CRTC_ICON_EN(x)                     (((x) >> 15) & 0x1)
+#define   C_000050_CRTC_ICON_EN                        0xFFFF7FFF
+#define   S_000050_CRTC_CUR_EN(x)                      (((x) & 0x1) << 16)
+#define   G_000050_CRTC_CUR_EN(x)                      (((x) >> 16) & 0x1)
+#define   C_000050_CRTC_CUR_EN                         0xFFFEFFFF
+#define   S_000050_CRTC_VSTAT_MODE(x)                  (((x) & 0x3) << 17)
+#define   G_000050_CRTC_VSTAT_MODE(x)                  (((x) >> 17) & 0x3)
+#define   C_000050_CRTC_VSTAT_MODE                     0xFFF9FFFF
+#define   S_000050_CRTC_CUR_MODE(x)                    (((x) & 0x7) << 20)
+#define   G_000050_CRTC_CUR_MODE(x)                    (((x) >> 20) & 0x7)
+#define   C_000050_CRTC_CUR_MODE                       0xFF8FFFFF
+#define   S_000050_CRTC_EXT_DISP_EN(x)                 (((x) & 0x1) << 24)
+#define   G_000050_CRTC_EXT_DISP_EN(x)                 (((x) >> 24) & 0x1)
+#define   C_000050_CRTC_EXT_DISP_EN                    0xFEFFFFFF
+#define   S_000050_CRTC_EN(x)                          (((x) & 0x1) << 25)
+#define   G_000050_CRTC_EN(x)                          (((x) >> 25) & 0x1)
+#define   C_000050_CRTC_EN                             0xFDFFFFFF
+#define   S_000050_CRTC_DISP_REQ_EN_B(x)               (((x) & 0x1) << 26)
+#define   G_000050_CRTC_DISP_REQ_EN_B(x)               (((x) >> 26) & 0x1)
+#define   C_000050_CRTC_DISP_REQ_EN_B                  0xFBFFFFFF
+#define R_000054_CRTC_EXT_CNTL                       0x000054
+#define   S_000054_CRTC_VGA_XOVERSCAN(x)               (((x) & 0x1) << 0)
+#define   G_000054_CRTC_VGA_XOVERSCAN(x)               (((x) >> 0) & 0x1)
+#define   C_000054_CRTC_VGA_XOVERSCAN                  0xFFFFFFFE
+#define   S_000054_VGA_BLINK_RATE(x)                   (((x) & 0x3) << 1)
+#define   G_000054_VGA_BLINK_RATE(x)                   (((x) >> 1) & 0x3)
+#define   C_000054_VGA_BLINK_RATE                      0xFFFFFFF9
+#define   S_000054_VGA_ATI_LINEAR(x)                   (((x) & 0x1) << 3)
+#define   G_000054_VGA_ATI_LINEAR(x)                   (((x) >> 3) & 0x1)
+#define   C_000054_VGA_ATI_LINEAR                      0xFFFFFFF7
+#define   S_000054_VGA_128KAP_PAGING(x)                (((x) & 0x1) << 4)
+#define   G_000054_VGA_128KAP_PAGING(x)                (((x) >> 4) & 0x1)
+#define   C_000054_VGA_128KAP_PAGING                   0xFFFFFFEF
+#define   S_000054_VGA_TEXT_132(x)                     (((x) & 0x1) << 5)
+#define   G_000054_VGA_TEXT_132(x)                     (((x) >> 5) & 0x1)
+#define   C_000054_VGA_TEXT_132                        0xFFFFFFDF
+#define   S_000054_VGA_XCRT_CNT_EN(x)                  (((x) & 0x1) << 6)
+#define   G_000054_VGA_XCRT_CNT_EN(x)                  (((x) >> 6) & 0x1)
+#define   C_000054_VGA_XCRT_CNT_EN                     0xFFFFFFBF
+#define   S_000054_CRTC_HSYNC_DIS(x)                   (((x) & 0x1) << 8)
+#define   G_000054_CRTC_HSYNC_DIS(x)                   (((x) >> 8) & 0x1)
+#define   C_000054_CRTC_HSYNC_DIS                      0xFFFFFEFF
+#define   S_000054_CRTC_VSYNC_DIS(x)                   (((x) & 0x1) << 9)
+#define   G_000054_CRTC_VSYNC_DIS(x)                   (((x) >> 9) & 0x1)
+#define   C_000054_CRTC_VSYNC_DIS                      0xFFFFFDFF
+#define   S_000054_CRTC_DISPLAY_DIS(x)                 (((x) & 0x1) << 10)
+#define   G_000054_CRTC_DISPLAY_DIS(x)                 (((x) >> 10) & 0x1)
+#define   C_000054_CRTC_DISPLAY_DIS                    0xFFFFFBFF
+#define   S_000054_CRTC_SYNC_TRISTATE(x)               (((x) & 0x1) << 11)
+#define   G_000054_CRTC_SYNC_TRISTATE(x)               (((x) >> 11) & 0x1)
+#define   C_000054_CRTC_SYNC_TRISTATE                  0xFFFFF7FF
+#define   S_000054_CRTC_HSYNC_TRISTATE(x)              (((x) & 0x1) << 12)
+#define   G_000054_CRTC_HSYNC_TRISTATE(x)              (((x) >> 12) & 0x1)
+#define   C_000054_CRTC_HSYNC_TRISTATE                 0xFFFFEFFF
+#define   S_000054_CRTC_VSYNC_TRISTATE(x)              (((x) & 0x1) << 13)
+#define   G_000054_CRTC_VSYNC_TRISTATE(x)              (((x) >> 13) & 0x1)
+#define   C_000054_CRTC_VSYNC_TRISTATE                 0xFFFFDFFF
+#define   S_000054_CRT_ON(x)                           (((x) & 0x1) << 15)
+#define   G_000054_CRT_ON(x)                           (((x) >> 15) & 0x1)
+#define   C_000054_CRT_ON                              0xFFFF7FFF
+#define   S_000054_VGA_CUR_B_TEST(x)                   (((x) & 0x1) << 17)
+#define   G_000054_VGA_CUR_B_TEST(x)                   (((x) >> 17) & 0x1)
+#define   C_000054_VGA_CUR_B_TEST                      0xFFFDFFFF
+#define   S_000054_VGA_PACK_DIS(x)                     (((x) & 0x1) << 18)
+#define   G_000054_VGA_PACK_DIS(x)                     (((x) >> 18) & 0x1)
+#define   C_000054_VGA_PACK_DIS                        0xFFFBFFFF
+#define   S_000054_VGA_MEM_PS_EN(x)                    (((x) & 0x1) << 19)
+#define   G_000054_VGA_MEM_PS_EN(x)                    (((x) >> 19) & 0x1)
+#define   C_000054_VGA_MEM_PS_EN                       0xFFF7FFFF
+#define   S_000054_VCRTC_IDX_MASTER(x)                 (((x) & 0x7F) << 24)
+#define   G_000054_VCRTC_IDX_MASTER(x)                 (((x) >> 24) & 0x7F)
+#define   C_000054_VCRTC_IDX_MASTER                    0x80FFFFFF
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION                     0x00014C
+#define   S_00014C_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_00014C_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_00014C_MC_AGP_START                        0xFFFF0000
+#define   S_00014C_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_00014C_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_00014C_MC_AGP_TOP                          0x0000FFFF
+#define R_000170_AGP_BASE                            0x000170
+#define   S_000170_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000170_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000170_AGP_BASE_ADDR                       0x00000000
+#define R_00023C_DISPLAY_BASE_ADDR                   0x00023C
+#define   S_00023C_DISPLAY_BASE_ADDR(x)                (((x) & 0xFFFFFFFF) << 0)
+#define   G_00023C_DISPLAY_BASE_ADDR(x)                (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00023C_DISPLAY_BASE_ADDR                   0x00000000
+#define R_000260_CUR_OFFSET                          0x000260
+#define   S_000260_CUR_OFFSET(x)                       (((x) & 0x7FFFFFF) << 0)
+#define   G_000260_CUR_OFFSET(x)                       (((x) >> 0) & 0x7FFFFFF)
+#define   C_000260_CUR_OFFSET                          0xF8000000
+#define   S_000260_CUR_LOCK(x)                         (((x) & 0x1) << 31)
+#define   G_000260_CUR_LOCK(x)                         (((x) >> 31) & 0x1)
+#define   C_000260_CUR_LOCK                            0x7FFFFFFF
+#define R_00033C_CRTC2_DISPLAY_BASE_ADDR             0x00033C
+#define   S_00033C_CRTC2_DISPLAY_BASE_ADDR(x)          (((x) & 0xFFFFFFFF) << 0)
+#define   G_00033C_CRTC2_DISPLAY_BASE_ADDR(x)          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00033C_CRTC2_DISPLAY_BASE_ADDR             0x00000000
+#define R_000360_CUR2_OFFSET                         0x000360
+#define   S_000360_CUR2_OFFSET(x)                      (((x) & 0x7FFFFFF) << 0)
+#define   G_000360_CUR2_OFFSET(x)                      (((x) >> 0) & 0x7FFFFFF)
+#define   C_000360_CUR2_OFFSET                         0xF8000000
+#define   S_000360_CUR2_LOCK(x)                        (((x) & 0x1) << 31)
+#define   G_000360_CUR2_LOCK(x)                        (((x) >> 31) & 0x1)
+#define   C_000360_CUR2_LOCK                           0x7FFFFFFF
+#define R_0003C2_GENMO_WT                            0x0003C2
+#define   S_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) & 0x1) << 0)
+#define   G_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) >> 0) & 0x1)
+#define   C_0003C2_GENMO_MONO_ADDRESS_B                0xFE
+#define   S_0003C2_VGA_RAM_EN(x)                       (((x) & 0x1) << 1)
+#define   G_0003C2_VGA_RAM_EN(x)                       (((x) >> 1) & 0x1)
+#define   C_0003C2_VGA_RAM_EN                          0xFD
+#define   S_0003C2_VGA_CKSEL(x)                        (((x) & 0x3) << 2)
+#define   G_0003C2_VGA_CKSEL(x)                        (((x) >> 2) & 0x3)
+#define   C_0003C2_VGA_CKSEL                           0xF3
+#define   S_0003C2_ODD_EVEN_MD_PGSEL(x)                (((x) & 0x1) << 5)
+#define   G_0003C2_ODD_EVEN_MD_PGSEL(x)                (((x) >> 5) & 0x1)
+#define   C_0003C2_ODD_EVEN_MD_PGSEL                   0xDF
+#define   S_0003C2_VGA_HSYNC_POL(x)                    (((x) & 0x1) << 6)
+#define   G_0003C2_VGA_HSYNC_POL(x)                    (((x) >> 6) & 0x1)
+#define   C_0003C2_VGA_HSYNC_POL                       0xBF
+#define   S_0003C2_VGA_VSYNC_POL(x)                    (((x) & 0x1) << 7)
+#define   G_0003C2_VGA_VSYNC_POL(x)                    (((x) >> 7) & 0x1)
+#define   C_0003C2_VGA_VSYNC_POL                       0x7F
+#define R_0003F8_CRTC2_GEN_CNTL                      0x0003F8
+#define   S_0003F8_CRTC2_DBL_SCAN_EN(x)                (((x) & 0x1) << 0)
+#define   G_0003F8_CRTC2_DBL_SCAN_EN(x)                (((x) >> 0) & 0x1)
+#define   C_0003F8_CRTC2_DBL_SCAN_EN                   0xFFFFFFFE
+#define   S_0003F8_CRTC2_INTERLACE_EN(x)               (((x) & 0x1) << 1)
+#define   G_0003F8_CRTC2_INTERLACE_EN(x)               (((x) >> 1) & 0x1)
+#define   C_0003F8_CRTC2_INTERLACE_EN                  0xFFFFFFFD
+#define   S_0003F8_CRTC2_SYNC_TRISTATE(x)              (((x) & 0x1) << 4)
+#define   G_0003F8_CRTC2_SYNC_TRISTATE(x)              (((x) >> 4) & 0x1)
+#define   C_0003F8_CRTC2_SYNC_TRISTATE                 0xFFFFFFEF
+#define   S_0003F8_CRTC2_HSYNC_TRISTATE(x)             (((x) & 0x1) << 5)
+#define   G_0003F8_CRTC2_HSYNC_TRISTATE(x)             (((x) >> 5) & 0x1)
+#define   C_0003F8_CRTC2_HSYNC_TRISTATE                0xFFFFFFDF
+#define   S_0003F8_CRTC2_VSYNC_TRISTATE(x)             (((x) & 0x1) << 6)
+#define   G_0003F8_CRTC2_VSYNC_TRISTATE(x)             (((x) >> 6) & 0x1)
+#define   C_0003F8_CRTC2_VSYNC_TRISTATE                0xFFFFFFBF
+#define   S_0003F8_CRT2_ON(x)                          (((x) & 0x1) << 7)
+#define   G_0003F8_CRT2_ON(x)                          (((x) >> 7) & 0x1)
+#define   C_0003F8_CRT2_ON                             0xFFFFFF7F
+#define   S_0003F8_CRTC2_PIX_WIDTH(x)                  (((x) & 0xF) << 8)
+#define   G_0003F8_CRTC2_PIX_WIDTH(x)                  (((x) >> 8) & 0xF)
+#define   C_0003F8_CRTC2_PIX_WIDTH                     0xFFFFF0FF
+#define   S_0003F8_CRTC2_ICON_EN(x)                    (((x) & 0x1) << 15)
+#define   G_0003F8_CRTC2_ICON_EN(x)                    (((x) >> 15) & 0x1)
+#define   C_0003F8_CRTC2_ICON_EN                       0xFFFF7FFF
+#define   S_0003F8_CRTC2_CUR_EN(x)                     (((x) & 0x1) << 16)
+#define   G_0003F8_CRTC2_CUR_EN(x)                     (((x) >> 16) & 0x1)
+#define   C_0003F8_CRTC2_CUR_EN                        0xFFFEFFFF
+#define   S_0003F8_CRTC2_CUR_MODE(x)                   (((x) & 0x7) << 20)
+#define   G_0003F8_CRTC2_CUR_MODE(x)                   (((x) >> 20) & 0x7)
+#define   C_0003F8_CRTC2_CUR_MODE                      0xFF8FFFFF
+#define   S_0003F8_CRTC2_DISPLAY_DIS(x)                (((x) & 0x1) << 23)
+#define   G_0003F8_CRTC2_DISPLAY_DIS(x)                (((x) >> 23) & 0x1)
+#define   C_0003F8_CRTC2_DISPLAY_DIS                   0xFF7FFFFF
+#define   S_0003F8_CRTC2_EN(x)                         (((x) & 0x1) << 25)
+#define   G_0003F8_CRTC2_EN(x)                         (((x) >> 25) & 0x1)
+#define   C_0003F8_CRTC2_EN                            0xFDFFFFFF
+#define   S_0003F8_CRTC2_DISP_REQ_EN_B(x)              (((x) & 0x1) << 26)
+#define   G_0003F8_CRTC2_DISP_REQ_EN_B(x)              (((x) >> 26) & 0x1)
+#define   C_0003F8_CRTC2_DISP_REQ_EN_B                 0xFBFFFFFF
+#define   S_0003F8_CRTC2_C_SYNC_EN(x)                  (((x) & 0x1) << 27)
+#define   G_0003F8_CRTC2_C_SYNC_EN(x)                  (((x) >> 27) & 0x1)
+#define   C_0003F8_CRTC2_C_SYNC_EN                     0xF7FFFFFF
+#define   S_0003F8_CRTC2_HSYNC_DIS(x)                  (((x) & 0x1) << 28)
+#define   G_0003F8_CRTC2_HSYNC_DIS(x)                  (((x) >> 28) & 0x1)
+#define   C_0003F8_CRTC2_HSYNC_DIS                     0xEFFFFFFF
+#define   S_0003F8_CRTC2_VSYNC_DIS(x)                  (((x) & 0x1) << 29)
+#define   G_0003F8_CRTC2_VSYNC_DIS(x)                  (((x) >> 29) & 0x1)
+#define   C_0003F8_CRTC2_VSYNC_DIS                     0xDFFFFFFF
+#define R_000420_OV0_SCALE_CNTL                      0x000420
+#define   S_000420_OV0_NO_READ_BEHIND_SCAN(x)          (((x) & 0x1) << 1)
+#define   G_000420_OV0_NO_READ_BEHIND_SCAN(x)          (((x) >> 1) & 0x1)
+#define   C_000420_OV0_NO_READ_BEHIND_SCAN             0xFFFFFFFD
+#define   S_000420_OV0_HORZ_PICK_NEAREST(x)            (((x) & 0x1) << 2)
+#define   G_000420_OV0_HORZ_PICK_NEAREST(x)            (((x) >> 2) & 0x1)
+#define   C_000420_OV0_HORZ_PICK_NEAREST               0xFFFFFFFB
+#define   S_000420_OV0_VERT_PICK_NEAREST(x)            (((x) & 0x1) << 3)
+#define   G_000420_OV0_VERT_PICK_NEAREST(x)            (((x) >> 3) & 0x1)
+#define   C_000420_OV0_VERT_PICK_NEAREST               0xFFFFFFF7
+#define   S_000420_OV0_SIGNED_UV(x)                    (((x) & 0x1) << 4)
+#define   G_000420_OV0_SIGNED_UV(x)                    (((x) >> 4) & 0x1)
+#define   C_000420_OV0_SIGNED_UV                       0xFFFFFFEF
+#define   S_000420_OV0_GAMMA_SEL(x)                    (((x) & 0x7) << 5)
+#define   G_000420_OV0_GAMMA_SEL(x)                    (((x) >> 5) & 0x7)
+#define   C_000420_OV0_GAMMA_SEL                       0xFFFFFF1F
+#define   S_000420_OV0_SURFACE_FORMAT(x)               (((x) & 0xF) << 8)
+#define   G_000420_OV0_SURFACE_FORMAT(x)               (((x) >> 8) & 0xF)
+#define   C_000420_OV0_SURFACE_FORMAT                  0xFFFFF0FF
+#define   S_000420_OV0_ADAPTIVE_DEINT(x)               (((x) & 0x1) << 12)
+#define   G_000420_OV0_ADAPTIVE_DEINT(x)               (((x) >> 12) & 0x1)
+#define   C_000420_OV0_ADAPTIVE_DEINT                  0xFFFFEFFF
+#define   S_000420_OV0_CRTC_SEL(x)                     (((x) & 0x1) << 14)
+#define   G_000420_OV0_CRTC_SEL(x)                     (((x) >> 14) & 0x1)
+#define   C_000420_OV0_CRTC_SEL                        0xFFFFBFFF
+#define   S_000420_OV0_BURST_PER_PLANE(x)              (((x) & 0x7F) << 16)
+#define   G_000420_OV0_BURST_PER_PLANE(x)              (((x) >> 16) & 0x7F)
+#define   C_000420_OV0_BURST_PER_PLANE                 0xFF80FFFF
+#define   S_000420_OV0_DOUBLE_BUFFER_REGS(x)           (((x) & 0x1) << 24)
+#define   G_000420_OV0_DOUBLE_BUFFER_REGS(x)           (((x) >> 24) & 0x1)
+#define   C_000420_OV0_DOUBLE_BUFFER_REGS              0xFEFFFFFF
+#define   S_000420_OV0_BANDWIDTH(x)                    (((x) & 0x1) << 26)
+#define   G_000420_OV0_BANDWIDTH(x)                    (((x) >> 26) & 0x1)
+#define   C_000420_OV0_BANDWIDTH                       0xFBFFFFFF
+#define   S_000420_OV0_LIN_TRANS_BYPASS(x)             (((x) & 0x1) << 28)
+#define   G_000420_OV0_LIN_TRANS_BYPASS(x)             (((x) >> 28) & 0x1)
+#define   C_000420_OV0_LIN_TRANS_BYPASS                0xEFFFFFFF
+#define   S_000420_OV0_INT_EMU(x)                      (((x) & 0x1) << 29)
+#define   G_000420_OV0_INT_EMU(x)                      (((x) >> 29) & 0x1)
+#define   C_000420_OV0_INT_EMU                         0xDFFFFFFF
+#define   S_000420_OV0_OVERLAY_EN(x)                   (((x) & 0x1) << 30)
+#define   G_000420_OV0_OVERLAY_EN(x)                   (((x) >> 30) & 0x1)
+#define   C_000420_OV0_OVERLAY_EN                      0xBFFFFFFF
+#define   S_000420_OV0_SOFT_RESET(x)                   (((x) & 0x1) << 31)
+#define   G_000420_OV0_SOFT_RESET(x)                   (((x) >> 31) & 0x1)
+#define   C_000420_OV0_SOFT_RESET                      0x7FFFFFFF
+#define R_00070C_CP_RB_RPTR_ADDR                     0x00070C
+#define   S_00070C_RB_RPTR_SWAP(x)                     (((x) & 0x3) << 0)
+#define   G_00070C_RB_RPTR_SWAP(x)                     (((x) >> 0) & 0x3)
+#define   C_00070C_RB_RPTR_SWAP                        0xFFFFFFFC
+#define   S_00070C_RB_RPTR_ADDR(x)                     (((x) & 0x3FFFFFFF) << 2)
+#define   G_00070C_RB_RPTR_ADDR(x)                     (((x) >> 2) & 0x3FFFFFFF)
+#define   C_00070C_RB_RPTR_ADDR                        0x00000003
+#define R_000740_CP_CSQ_CNTL                         0x000740
+#define   S_000740_CSQ_CNT_PRIMARY(x)                  (((x) & 0xFF) << 0)
+#define   G_000740_CSQ_CNT_PRIMARY(x)                  (((x) >> 0) & 0xFF)
+#define   C_000740_CSQ_CNT_PRIMARY                     0xFFFFFF00
+#define   S_000740_CSQ_CNT_INDIRECT(x)                 (((x) & 0xFF) << 8)
+#define   G_000740_CSQ_CNT_INDIRECT(x)                 (((x) >> 8) & 0xFF)
+#define   C_000740_CSQ_CNT_INDIRECT                    0xFFFF00FF
+#define   S_000740_CSQ_MODE(x)                         (((x) & 0xF) << 28)
+#define   G_000740_CSQ_MODE(x)                         (((x) >> 28) & 0xF)
+#define   C_000740_CSQ_MODE                            0x0FFFFFFF
+#define R_000770_SCRATCH_UMSK                        0x000770
+#define   S_000770_SCRATCH_UMSK(x)                     (((x) & 0x3F) << 0)
+#define   G_000770_SCRATCH_UMSK(x)                     (((x) >> 0) & 0x3F)
+#define   C_000770_SCRATCH_UMSK                        0xFFFFFFC0
+#define   S_000770_SCRATCH_SWAP(x)                     (((x) & 0x3) << 16)
+#define   G_000770_SCRATCH_SWAP(x)                     (((x) >> 16) & 0x3)
+#define   C_000770_SCRATCH_SWAP                        0xFFFCFFFF
+#define R_000774_SCRATCH_ADDR                        0x000774
+#define   S_000774_SCRATCH_ADDR(x)                     (((x) & 0x7FFFFFF) << 5)
+#define   G_000774_SCRATCH_ADDR(x)                     (((x) >> 5) & 0x7FFFFFF)
+#define   C_000774_SCRATCH_ADDR                        0x0000001F
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_SE_BUSY(x)                          (((x) & 0x1) << 20)
+#define   G_000E40_SE_BUSY(x)                          (((x) >> 20) & 0x1)
+#define   C_000E40_SE_BUSY                             0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_TCLK_SRC_SEL(x)                     (((x) & 0x7) << 8)
+#define   G_00000D_TCLK_SRC_SEL(x)                     (((x) >> 8) & 0x7)
+#define   C_00000D_TCLK_SRC_SEL                        0xFFFFF8FF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP(x)                       (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP(x)                       (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP                          0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+
+/* PLL regs */
+#define SCLK_CNTL                                      0xd
+#define   FORCE_HDP                                    (1 << 17)
+#define CLK_PWRMGT_CNTL                                0x14
+#define   GLOBAL_PMAN_EN                               (1 << 10)
+#define   DISP_PM                                      (1 << 20)
+#define PLL_PWRMGT_CNTL                                0x15
+#define   MPLL_TURNOFF                                 (1 << 0)
+#define   SPLL_TURNOFF                                 (1 << 1)
+#define   PPLL_TURNOFF                                 (1 << 2)
+#define   P2PLL_TURNOFF                                (1 << 3)
+#define   TVPLL_TURNOFF                                (1 << 4)
+#define   MOBILE_SU                                    (1 << 16)
+#define   SU_SCLK_USE_BCLK                             (1 << 17)
+#define SCLK_CNTL2                                     0x1e
+#define   REDUCED_SPEED_SCLK_MODE                      (1 << 16)
+#define   REDUCED_SPEED_SCLK_SEL(x)                    ((x) << 17)
+#define MCLK_MISC                                      0x1f
+#define   EN_MCLK_TRISTATE_IN_SUSPEND                  (1 << 18)
+#define SCLK_MORE_CNTL                                 0x35
+#define   REDUCED_SPEED_SCLK_EN                        (1 << 16)
+#define   IO_CG_VOLTAGE_DROP                           (1 << 17)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 20)
+#define   VOLTAGE_DROP_SYNC                            (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN                                   0xd08
+#define   DISP_D3_GRPH_RST                             (1 << 18)
+#define   DISP_D3_SUBPIC_RST                           (1 << 19)
+#define   DISP_D3_OV0_RST                              (1 << 20)
+#define   DISP_D1D2_GRPH_RST                           (1 << 21)
+#define   DISP_D1D2_SUBPIC_RST                         (1 << 22)
+#define   DISP_D1D2_OV0_RST                            (1 << 23)
+#define   DISP_DVO_ENABLE_RST                          (1 << 24)
+#define   TV_ENABLE_RST                                (1 << 25)
+#define   AUTO_PWRUP_EN                                (1 << 26)
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/r100d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r200.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r200.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r200.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,553 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r200.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+
+#include "r100d.h"
+#include "r200_reg_safe.h"
+
+#include "r100_track.h"
+
+static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
+{
+	int vtx_size, i;
+	vtx_size = 2;
+
+	if (vtx_fmt_0 & R200_VTX_Z0)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_W0)
+		vtx_size++;
+	/* blend weight */
+	if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
+		vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7;
+	if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_N0)
+		vtx_size += 3;
+	if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_SHININESS_0)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_SHININESS_1)
+		vtx_size++;
+	for (i = 0; i < 8; i++) {
+		int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3;
+		switch (color_size) {
+		case 0: break;
+		case 1: vtx_size++; break;
+		case 2: vtx_size += 3; break;
+		case 3: vtx_size += 4; break;
+		}
+	}
+	if (vtx_fmt_0 & R200_VTX_XY1)
+		vtx_size += 2;
+	if (vtx_fmt_0 & R200_VTX_Z1)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_W1)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_N1)
+		vtx_size += 3;
+	return vtx_size;
+}
+
+int r200_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset,
+		  uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		  struct radeon_fence **fence)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t size;
+	uint32_t cur_size;
+	int i, num_loops;
+	int r = 0;
+
+	/* radeon pitch is /64 */
+	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
+	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+	/* Must wait for 2D idle & clean before DMA or hangs might happen */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, (1 << 16));
+	for (i = 0; i < num_loops; i++) {
+		cur_size = size;
+		if (cur_size > 0x1FFFFF) {
+			cur_size = 0x1FFFFF;
+		}
+		size -= cur_size;
+		radeon_ring_write(ring, PACKET0(0x720, 2));
+		radeon_ring_write(ring, src_offset);
+		radeon_ring_write(ring, dst_offset);
+		radeon_ring_write(ring, cur_size | (1U << 31) | (1 << 30));
+		src_offset += cur_size;
+		dst_offset += cur_size;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
+	if (fence) {
+		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+	}
+	radeon_ring_unlock_commit(rdev, ring);
+	return r;
+}
+
+
+static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
+{
+	int vtx_size, i, tex_size;
+	vtx_size = 0;
+	for (i = 0; i < 6; i++) {
+		tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7;
+		if (tex_size > 4)
+			continue;
+		vtx_size += tex_size;
+	}
+	return vtx_size;
+}
+
+int r200_packet0_check(struct radeon_cs_parser *p,
+		       struct radeon_cs_packet *pkt,
+		       unsigned idx, unsigned reg)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp;
+	int r;
+	int i;
+	int face;
+	u32 tile_flags = 0;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	idx_value = radeon_get_ib_value(p, idx);
+	switch (reg) {
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+		/* FIXME: only allow PACKET3 blit? easier to check for out of
+		 * range access */
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case RADEON_RB3D_DEPTHOFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_RB3D_COLOROFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[0].robj = reloc->robj;
+		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R200_PP_TXOFFSET_0:
+	case R200_PP_TXOFFSET_1:
+	case R200_PP_TXOFFSET_2:
+	case R200_PP_TXOFFSET_3:
+	case R200_PP_TXOFFSET_4:
+	case R200_PP_TXOFFSET_5:
+		i = (reg - R200_PP_TXOFFSET_0) / 24;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R200_TXO_MACRO_TILE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R200_TXO_MICRO_TILE;
+
+			tmp = idx_value & ~(0x7 << 2);
+			tmp |= tile_flags;
+			ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+		} else
+			ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_CUBIC_OFFSET_F1_0:
+	case R200_PP_CUBIC_OFFSET_F2_0:
+	case R200_PP_CUBIC_OFFSET_F3_0:
+	case R200_PP_CUBIC_OFFSET_F4_0:
+	case R200_PP_CUBIC_OFFSET_F5_0:
+	case R200_PP_CUBIC_OFFSET_F1_1:
+	case R200_PP_CUBIC_OFFSET_F2_1:
+	case R200_PP_CUBIC_OFFSET_F3_1:
+	case R200_PP_CUBIC_OFFSET_F4_1:
+	case R200_PP_CUBIC_OFFSET_F5_1:
+	case R200_PP_CUBIC_OFFSET_F1_2:
+	case R200_PP_CUBIC_OFFSET_F2_2:
+	case R200_PP_CUBIC_OFFSET_F3_2:
+	case R200_PP_CUBIC_OFFSET_F4_2:
+	case R200_PP_CUBIC_OFFSET_F5_2:
+	case R200_PP_CUBIC_OFFSET_F1_3:
+	case R200_PP_CUBIC_OFFSET_F2_3:
+	case R200_PP_CUBIC_OFFSET_F3_3:
+	case R200_PP_CUBIC_OFFSET_F4_3:
+	case R200_PP_CUBIC_OFFSET_F5_3:
+	case R200_PP_CUBIC_OFFSET_F1_4:
+	case R200_PP_CUBIC_OFFSET_F2_4:
+	case R200_PP_CUBIC_OFFSET_F3_4:
+	case R200_PP_CUBIC_OFFSET_F4_4:
+	case R200_PP_CUBIC_OFFSET_F5_4:
+	case R200_PP_CUBIC_OFFSET_F1_5:
+	case R200_PP_CUBIC_OFFSET_F2_5:
+	case R200_PP_CUBIC_OFFSET_F3_5:
+	case R200_PP_CUBIC_OFFSET_F4_5:
+	case R200_PP_CUBIC_OFFSET_F5_5:
+		i = (reg - R200_PP_TXOFFSET_0) / 24;
+		face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[i].cube_info[face - 1].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[i].cube_info[face - 1].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_RE_WIDTH_HEIGHT:
+		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_COLORPITCH:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_COLOR_TILE_ENABLE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		} else
+			ib[idx] = idx_value;
+
+		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
+		break;
+	case RADEON_RB3D_DEPTHPITCH:
+		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_CNTL:
+		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+		case 7:
+		case 8:
+		case 9:
+		case 11:
+		case 12:
+			track->cb[0].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 15:
+			track->cb[0].cpp = 2;
+			break;
+		case 6:
+			track->cb[0].cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+			return -EINVAL;
+		}
+		if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
+			DRM_ERROR("No support for depth xy offset in kms\n");
+			return -EINVAL;
+		}
+
+		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZSTENCILCNTL:
+		switch (idx_value & 0xf) {
+		case 0:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+		case 3:
+		case 4:
+		case 5:
+		case 9:
+		case 11:
+			track->zb.cpp = 4;
+			break;
+		default:
+			break;
+		}
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZPASS_ADDR:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_PP_CNTL:
+		{
+			uint32_t temp = idx_value >> 4;
+			for (i = 0; i < track->num_texture; i++)
+				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
+		}
+		break;
+	case RADEON_SE_VF_CNTL:
+		track->vap_vf_cntl = idx_value;
+		break;
+	case 0x210c:
+		/* VAP_VF_MAX_VTX_INDX */
+		track->max_indx = idx_value & 0x00FFFFFFUL;
+		break;
+	case R200_SE_VTX_FMT_0:
+		track->vtx_size = r200_get_vtx_size_0(idx_value);
+		break;
+	case R200_SE_VTX_FMT_1:
+		track->vtx_size += r200_get_vtx_size_1(idx_value);
+		break;
+	case R200_PP_TXSIZE_0:
+	case R200_PP_TXSIZE_1:
+	case R200_PP_TXSIZE_2:
+	case R200_PP_TXSIZE_3:
+	case R200_PP_TXSIZE_4:
+	case R200_PP_TXSIZE_5:
+		i = (reg - R200_PP_TXSIZE_0) / 32;
+		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXPITCH_0:
+	case R200_PP_TXPITCH_1:
+	case R200_PP_TXPITCH_2:
+	case R200_PP_TXPITCH_3:
+	case R200_PP_TXPITCH_4:
+	case R200_PP_TXPITCH_5:
+		i = (reg - R200_PP_TXPITCH_0) / 32;
+		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXFILTER_0:
+	case R200_PP_TXFILTER_1:
+	case R200_PP_TXFILTER_2:
+	case R200_PP_TXFILTER_3:
+	case R200_PP_TXFILTER_4:
+	case R200_PP_TXFILTER_5:
+		i = (reg - R200_PP_TXFILTER_0) / 32;
+		track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
+						 >> R200_MAX_MIP_LEVEL_SHIFT);
+		tmp = (idx_value >> 23) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_w = false;
+		tmp = (idx_value >> 27) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXMULTI_CTL_0:
+	case R200_PP_TXMULTI_CTL_1:
+	case R200_PP_TXMULTI_CTL_2:
+	case R200_PP_TXMULTI_CTL_3:
+	case R200_PP_TXMULTI_CTL_4:
+	case R200_PP_TXMULTI_CTL_5:
+		i = (reg - R200_PP_TXMULTI_CTL_0) / 32;
+		break;
+	case R200_PP_TXFORMAT_X_0:
+	case R200_PP_TXFORMAT_X_1:
+	case R200_PP_TXFORMAT_X_2:
+	case R200_PP_TXFORMAT_X_3:
+	case R200_PP_TXFORMAT_X_4:
+	case R200_PP_TXFORMAT_X_5:
+		i = (reg - R200_PP_TXFORMAT_X_0) / 32;
+		track->textures[i].txdepth = idx_value & 0x7;
+		tmp = (idx_value >> 16) & 0x3;
+		/* 2D, 3D, CUBE */
+		switch (tmp) {
+		case 0:
+		case 3:
+		case 4:
+		case 5:
+		case 6:
+		case 7:
+			/* 1D/2D */
+			track->textures[i].tex_coord_type = 0;
+			break;
+		case 1:
+			/* CUBE */
+			track->textures[i].tex_coord_type = 2;
+			break;
+		case 2:
+			/* 3D */
+			track->textures[i].tex_coord_type = 1;
+			break;
+		}
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXFORMAT_0:
+	case R200_PP_TXFORMAT_1:
+	case R200_PP_TXFORMAT_2:
+	case R200_PP_TXFORMAT_3:
+	case R200_PP_TXFORMAT_4:
+	case R200_PP_TXFORMAT_5:
+		i = (reg - R200_PP_TXFORMAT_0) / 32;
+		if (idx_value & R200_TXFORMAT_NON_POWER2) {
+			track->textures[i].use_pitch = 1;
+		} else {
+			track->textures[i].use_pitch = 0;
+			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+		}
+		if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+			track->textures[i].lookup_disable = true;
+		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
+		case R200_TXFORMAT_I8:
+		case R200_TXFORMAT_RGB332:
+		case R200_TXFORMAT_Y8:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_AI88:
+		case R200_TXFORMAT_ARGB1555:
+		case R200_TXFORMAT_RGB565:
+		case R200_TXFORMAT_ARGB4444:
+		case R200_TXFORMAT_VYUY422:
+		case R200_TXFORMAT_YVYU422:
+		case R200_TXFORMAT_LDVDU655:
+		case R200_TXFORMAT_DVDU88:
+		case R200_TXFORMAT_AVYU4444:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_ARGB8888:
+		case R200_TXFORMAT_RGBA8888:
+		case R200_TXFORMAT_ABGR8888:
+		case R200_TXFORMAT_BGR111110:
+		case R200_TXFORMAT_LDVDU8888:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case R200_TXFORMAT_DXT23:
+		case R200_TXFORMAT_DXT45:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		}
+		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
+		break;
+	case R200_PP_CUBIC_FACES_0:
+	case R200_PP_CUBIC_FACES_1:
+	case R200_PP_CUBIC_FACES_2:
+	case R200_PP_CUBIC_FACES_3:
+	case R200_PP_CUBIC_FACES_4:
+	case R200_PP_CUBIC_FACES_5:
+		tmp = idx_value;
+		i = (reg - R200_PP_CUBIC_FACES_0) / 32;
+		for (face = 0; face < 4; face++) {
+			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
+			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
+		}
+		track->tex_dirty = true;
+		break;
+	default:
+		DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
+		       reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void r200_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
+	rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
+}


Property changes on: trunk/sys/dev/drm2/radeon/r200.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r200_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r200_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r200_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,32 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r200_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned r200_reg_safe_bm[102] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+	0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFE7FE1F, 0xF003FFFF, 0x7EFFFFFF, 0xFFFF803C,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFEFCE, 0xFFFEFFFF, 0xFFFFFFFE,
+	0x020E0FF0, 0xFFCC83FD, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFBFFFF, 0xEFFCFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xDFDFDFDF, 0x3FFDDFDF, 0xFFFFFFFF, 0xFFFFFF7F,
+	0xFFFFFFFF, 0x00FFFFFF, 0x00000000, 0x00000000,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFE3F, 0xFFFFFFEF,
+};


Property changes on: trunk/sys/dev/drm2/radeon/r200_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r300.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r300.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r300.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1567 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r300.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "r100_track.h"
+#include "r300d.h"
+#include "rv350d.h"
+#include "r300_reg_safe.h"
+
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
+ *
+ * GPU Errata:
+ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
+ *   using MMIO to flush host path read cache, this lead to HARDLOCKUP.
+ *   However, scheduling such write to the ring seems harmless, i suspect
+ *   the CP read collide with the flush somehow, or maybe the MC, hard to
+ *   tell. (Jerome Glisse)
+ */
+
+/*
+ * rv370,rv380 PCIE GART
+ */
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	int i;
+
+	/* Workaround HW bug do flush 2 times */
+	for (i = 0; i < 2; i++) {
+		tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
+		(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	}
+	mb();
+}
+
+#define R300_PTE_WRITEABLE (1 << 2)
+#define R300_PTE_READABLE  (1 << 3)
+
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+	volatile uint32_t *ptr = rdev->gart.ptr;
+
+	if (i < 0 || i > rdev->gart.num_gpu_pages) {
+		return -EINVAL;
+	}
+	addr = (lower_32_bits(addr) >> 8) |
+	       ((upper_32_bits(addr) & 0xff) << 24) |
+	       R300_PTE_WRITEABLE | R300_PTE_READABLE;
+	/* on x86 we want this to be CPU endian, on powerpc
+	 * on powerpc without HW swappers, it'll get swapped on way
+	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
+	ptr += i;
+	*ptr = (uint32_t)addr;
+	return 0;
+}
+
+int rv370_pcie_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		DRM_ERROR("RV370 PCIE GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	r = rv370_debugfs_pcie_gart_info_init(rdev);
+	if (r)
+		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+	rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+int rv370_pcie_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t table_addr;
+	uint32_t tmp;
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* discard memory request outside of configured range */
+	tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
+	tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+	table_addr = rdev->gart.table_addr;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
+	/* FIXME: setup default page */
+	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
+	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
+	/* Clear error */
+	WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	tmp |= RADEON_PCIE_TX_GART_EN;
+	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	rv370_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void rv370_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+void rv370_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rv370_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+void r300_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+	/* Who ever call radeon_fence_emit should call ring_lock and ask
+	 * for enough space (today caller are ib schedule and buffer move) */
+	/* Write SC register so SC & US assert idle */
+	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+	radeon_ring_write(ring, 0);
+	/* Flush 3D cache */
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH);
+	/* Wait until IDLE & CLEAN */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
+				 RADEON_WAIT_2D_IDLECLEAN |
+				 RADEON_WAIT_DMA_GUI_IDLE));
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
+				RADEON_HDP_READ_BUFFER_INVALIDATE);
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
+	/* Emit fence sequence & fire IRQ */
+	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	unsigned gb_tile_config;
+	int r;
+
+	/* Sub pixel 1/12 so we can have 4K rendering according to doc */
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+	switch(rdev->num_gb_pipes) {
+	case 2:
+		gb_tile_config |= R300_PIPE_COUNT_R300;
+		break;
+	case 3:
+		gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+		break;
+	case 4:
+		gb_tile_config |= R300_PIPE_COUNT_R420;
+		break;
+	case 1:
+	default:
+		gb_tile_config |= R300_PIPE_COUNT_RV350;
+		break;
+	}
+
+	r = radeon_ring_lock(rdev, ring, 64);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  RADEON_ISYNC_ANY2D_IDLE3D |
+			  RADEON_ISYNC_ANY3D_IDLE2D |
+			  RADEON_ISYNC_WAIT_IDLEGUI |
+			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+	radeon_ring_write(ring, gb_tile_config);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+	radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+	radeon_ring_write(ring,
+			  ((6 << R300_MS_X0_SHIFT) |
+			   (6 << R300_MS_Y0_SHIFT) |
+			   (6 << R300_MS_X1_SHIFT) |
+			   (6 << R300_MS_Y1_SHIFT) |
+			   (6 << R300_MS_X2_SHIFT) |
+			   (6 << R300_MS_Y2_SHIFT) |
+			   (6 << R300_MSBD0_Y_SHIFT) |
+			   (6 << R300_MSBD0_X_SHIFT)));
+	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+	radeon_ring_write(ring,
+			  ((6 << R300_MS_X3_SHIFT) |
+			   (6 << R300_MS_Y3_SHIFT) |
+			   (6 << R300_MS_X4_SHIFT) |
+			   (6 << R300_MS_Y4_SHIFT) |
+			   (6 << R300_MS_X5_SHIFT) |
+			   (6 << R300_MS_Y5_SHIFT) |
+			   (6 << R300_MSBD1_SHIFT)));
+	radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+	radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+	radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+	radeon_ring_write(ring,
+			  R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+	radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+	radeon_ring_write(ring,
+			  R300_GEOMETRY_ROUND_NEAREST |
+			  R300_COLOR_ROUND_NEAREST);
+	radeon_ring_unlock_commit(rdev, ring);
+}
+
+static void r300_errata(struct radeon_device *rdev)
+{
+	rdev->pll_errata = 0;
+
+	if (rdev->family == CHIP_R300 &&
+	    (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
+		rdev->pll_errata |= CHIP_ERRATA_R300_CG;
+	}
+}
+
+int r300_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & R300_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void r300_gpu_init(struct radeon_device *rdev)
+{
+	uint32_t gb_tile_config, tmp;
+
+	if ((rdev->family == CHIP_R300 && rdev->ddev->pci_device != 0x4144) ||
+	    (rdev->family == CHIP_R350 && rdev->ddev->pci_device != 0x4148)) {
+		/* r300,r350 */
+		rdev->num_gb_pipes = 2;
+	} else {
+		/* rv350,rv370,rv380,r300 AD, r350 AH */
+		rdev->num_gb_pipes = 1;
+	}
+	rdev->num_z_pipes = 1;
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+	switch (rdev->num_gb_pipes) {
+	case 2:
+		gb_tile_config |= R300_PIPE_COUNT_R300;
+		break;
+	case 3:
+		gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+		break;
+	case 4:
+		gb_tile_config |= R300_PIPE_COUNT_R420;
+		break;
+	default:
+	case 1:
+		gb_tile_config |= R300_PIPE_COUNT_RV350;
+		break;
+	}
+	WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
+
+	WREG32(R300_RB2D_DSTCACHE_MODE,
+	       R300_DC_AUTOFLUSH_ENABLE |
+	       R300_DC_DC_DISABLE_IGNORE_PE);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+	if (r300_mc_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+	DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
+		 rdev->num_gb_pipes, rdev->num_z_pipes);
+}
+
+int r300_asic_reset(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	r100_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	/* save PCI state */
+	pci_save_state(device_get_parent(rdev->dev));
+	/* disable bus mastering */
+	r100_bm_disable(rdev);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+					S_0000F0_SOFT_RESET_GA(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* resetting the CP seems to be problematic sometimes it end up
+	 * hard locking the computer, but it's necessary for successful
+	 * reset more test & playing is needed on R3XX/R4XX to find a
+	 * reliable (if any solution)
+	 */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(device_get_parent(rdev->dev));
+	r100_enable_bm(rdev);
+	/* Check if GPU is idle */
+	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	r100_mc_resume(rdev, &save);
+	return ret;
+}
+
+/*
+ * r300,r350,rv350,rv380 VRAM info
+ */
+void r300_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+	u32 tmp;
+
+	/* DDR for all card after R300 & IGP */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(RADEON_MEM_CNTL);
+	tmp &= R300_MEM_NUM_CHANNELS_MASK;
+	switch (tmp) {
+	case 0: rdev->mc.vram_width = 64; break;
+	case 1: rdev->mc.vram_width = 128; break;
+	case 2: rdev->mc.vram_width = 256; break;
+	default:  rdev->mc.vram_width = 128; break;
+	}
+	r100_vram_init_sizes(rdev);
+	base = rdev->mc.aper_base;
+	if (rdev->flags & RADEON_IS_IGP)
+		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+	uint32_t link_width_cntl, mask;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* FIXME wait for idle */
+
+	switch (lanes) {
+	case 0:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+		break;
+	case 1:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+		break;
+	case 2:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+		break;
+	case 4:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+		break;
+	case 8:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+		break;
+	case 12:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+		break;
+	case 16:
+	default:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+		break;
+	}
+
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+		return;
+
+	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+			     RADEON_PCIE_LC_RECONFIG_NOW |
+			     RADEON_PCIE_LC_RECONFIG_LATER |
+			     RADEON_PCIE_LC_SHORT_RECONFIG_EN);
+	link_width_cntl |= mask;
+	WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+						     RADEON_PCIE_LC_RECONFIG_NOW));
+
+	/* wait for lane set to complete */
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+	while (link_width_cntl == 0xffffffff)
+		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+}
+
+int rv370_get_pcie_lanes(struct radeon_device *rdev)
+{
+	u32 link_width_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return 0;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return 0;
+
+	/* FIXME wait for idle */
+
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+		return 0;
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
+	seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
+	seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
+	seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
+	seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
+	seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
+	seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rv370_pcie_gart_info_list[] = {
+	{"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
+};
+#endif
+
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static int r300_packet0_check(struct radeon_cs_parser *p,
+		struct radeon_cs_packet *pkt,
+		unsigned idx, unsigned reg)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp, tile_flags = 0;
+	unsigned i;
+	int r;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch(reg) {
+	case AVIVO_D1MODE_VLINE_START_END:
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case R300_RB3D_COLOROFFSET0:
+	case R300_RB3D_COLOROFFSET1:
+	case R300_RB3D_COLOROFFSET2:
+	case R300_RB3D_COLOROFFSET3:
+		i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[i].robj = reloc->robj;
+		track->cb[i].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_ZB_DEPTHOFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_TX_OFFSET_0:
+	case R300_TX_OFFSET_0+4:
+	case R300_TX_OFFSET_0+8:
+	case R300_TX_OFFSET_0+12:
+	case R300_TX_OFFSET_0+16:
+	case R300_TX_OFFSET_0+20:
+	case R300_TX_OFFSET_0+24:
+	case R300_TX_OFFSET_0+28:
+	case R300_TX_OFFSET_0+32:
+	case R300_TX_OFFSET_0+36:
+	case R300_TX_OFFSET_0+40:
+	case R300_TX_OFFSET_0+44:
+	case R300_TX_OFFSET_0+48:
+	case R300_TX_OFFSET_0+52:
+	case R300_TX_OFFSET_0+56:
+	case R300_TX_OFFSET_0+60:
+		i = (reg - R300_TX_OFFSET_0) >> 2;
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+
+		if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
+			ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+				  ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+		} else {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_TXO_MACRO_TILE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_TXO_MICRO_TILE;
+			else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+			tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	/* Tracked registers */
+	case 0x2084:
+		/* VAP_VF_CNTL */
+		track->vap_vf_cntl = idx_value;
+		break;
+	case 0x20B4:
+		/* VAP_VTX_SIZE */
+		track->vtx_size = idx_value & 0x7F;
+		break;
+	case 0x2134:
+		/* VAP_VF_MAX_VTX_INDX */
+		track->max_indx = idx_value & 0x00FFFFFFUL;
+		break;
+	case 0x2088:
+		/* VAP_ALT_NUM_VERTICES - only valid on r500 */
+		if (p->rdev->family < CHIP_RV515)
+			goto fail;
+		track->vap_alt_nverts = idx_value & 0xFFFFFF;
+		break;
+	case 0x43E4:
+		/* SC_SCISSOR1 */
+		track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
+		if (p->rdev->family < CHIP_RV515) {
+			track->maxy -= 1440;
+		}
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case 0x4E00:
+		/* RB3D_CCTL */
+		if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+		    p->rdev->cmask_filp != p->filp) {
+			DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+			return -EINVAL;
+		}
+		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+		track->cb_dirty = true;
+		break;
+	case 0x4E38:
+	case 0x4E3C:
+	case 0x4E40:
+	case 0x4E44:
+		/* RB3D_COLORPITCH0 */
+		/* RB3D_COLORPITCH1 */
+		/* RB3D_COLORPITCH2 */
+		/* RB3D_COLORPITCH3 */
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = r100_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					  idx, reg);
+				r100_cs_dump_packet(p, pkt);
+				return r;
+			}
+
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_COLOR_TILE_ENABLE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+			else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		i = (reg - 0x4E38) >> 2;
+		track->cb[i].pitch = idx_value & 0x3FFE;
+		switch (((idx_value >> 21) & 0xF)) {
+		case 9:
+		case 11:
+		case 12:
+			track->cb[i].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 13:
+		case 15:
+			track->cb[i].cpp = 2;
+			break;
+		case 5:
+			if (p->rdev->family < CHIP_RV515) {
+				DRM_ERROR("Invalid color buffer format (%d)!\n",
+					  ((idx_value >> 21) & 0xF));
+				return -EINVAL;
+			}
+			/* Pass through. */
+		case 6:
+			track->cb[i].cpp = 4;
+			break;
+		case 10:
+			track->cb[i].cpp = 8;
+			break;
+		case 7:
+			track->cb[i].cpp = 16;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> 21) & 0xF));
+			return -EINVAL;
+		}
+		track->cb_dirty = true;
+		break;
+	case 0x4F00:
+		/* ZB_CNTL */
+		if (idx_value & 2) {
+			track->z_enabled = true;
+		} else {
+			track->z_enabled = false;
+		}
+		track->zb_dirty = true;
+		break;
+	case 0x4F10:
+		/* ZB_FORMAT */
+		switch ((idx_value & 0xF)) {
+		case 0:
+		case 1:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+			track->zb.cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid z buffer format (%d) !\n",
+				  (idx_value & 0xF));
+			return -EINVAL;
+		}
+		track->zb_dirty = true;
+		break;
+	case 0x4F24:
+		/* ZB_DEPTHPITCH */
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = r100_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					  idx, reg);
+				r100_cs_dump_packet(p, pkt);
+				return r;
+			}
+
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_DEPTHMICROTILE_TILED;
+			else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		track->zb.pitch = idx_value & 0x3FFC;
+		track->zb_dirty = true;
+		break;
+	case 0x4104:
+		/* TX_ENABLE */
+		for (i = 0; i < 16; i++) {
+			bool enabled;
+
+			enabled = !!(idx_value & (1 << i));
+			track->textures[i].enabled = enabled;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x44C0:
+	case 0x44C4:
+	case 0x44C8:
+	case 0x44CC:
+	case 0x44D0:
+	case 0x44D4:
+	case 0x44D8:
+	case 0x44DC:
+	case 0x44E0:
+	case 0x44E4:
+	case 0x44E8:
+	case 0x44EC:
+	case 0x44F0:
+	case 0x44F4:
+	case 0x44F8:
+	case 0x44FC:
+		/* TX_FORMAT1_[0-15] */
+		i = (reg - 0x44C0) >> 2;
+		tmp = (idx_value >> 25) & 0x3;
+		track->textures[i].tex_coord_type = tmp;
+		switch ((idx_value & 0x1F)) {
+		case R300_TX_FORMAT_X8:
+		case R300_TX_FORMAT_Y4X4:
+		case R300_TX_FORMAT_Z3Y3X2:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_X16:
+		case R300_TX_FORMAT_FL_I16:
+		case R300_TX_FORMAT_Y8X8:
+		case R300_TX_FORMAT_Z5Y6X5:
+		case R300_TX_FORMAT_Z6Y5X5:
+		case R300_TX_FORMAT_W4Z4Y4X4:
+		case R300_TX_FORMAT_W1Z5Y5X5:
+		case R300_TX_FORMAT_D3DMFT_CxV8U8:
+		case R300_TX_FORMAT_B8G8_B8G8:
+		case R300_TX_FORMAT_G8R8_G8B8:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_Y16X16:
+		case R300_TX_FORMAT_FL_I16A16:
+		case R300_TX_FORMAT_Z11Y11X10:
+		case R300_TX_FORMAT_Z10Y11X11:
+		case R300_TX_FORMAT_W8Z8Y8X8:
+		case R300_TX_FORMAT_W2Z10Y10X10:
+		case 0x17:
+		case R300_TX_FORMAT_FL_I32:
+		case 0x1e:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_W16Z16Y16X16:
+		case R300_TX_FORMAT_FL_R16G16B16A16:
+		case R300_TX_FORMAT_FL_I32A32:
+			track->textures[i].cpp = 8;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_FL_R32G32B32A32:
+			track->textures[i].cpp = 16;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case R300_TX_FORMAT_ATI2N:
+			if (p->rdev->family < CHIP_R420) {
+				DRM_ERROR("Invalid texture format %u\n",
+					  (idx_value & 0x1F));
+				return -EINVAL;
+			}
+			/* The same rules apply as for DXT3/5. */
+			/* Pass through. */
+		case R300_TX_FORMAT_DXT3:
+		case R300_TX_FORMAT_DXT5:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+			break;
+		default:
+			DRM_ERROR("Invalid texture format %u\n",
+				  (idx_value & 0x1F));
+			return -EINVAL;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4400:
+	case 0x4404:
+	case 0x4408:
+	case 0x440C:
+	case 0x4410:
+	case 0x4414:
+	case 0x4418:
+	case 0x441C:
+	case 0x4420:
+	case 0x4424:
+	case 0x4428:
+	case 0x442C:
+	case 0x4430:
+	case 0x4434:
+	case 0x4438:
+	case 0x443C:
+		/* TX_FILTER0_[0-15] */
+		i = (reg - 0x4400) >> 2;
+		tmp = idx_value & 0x7;
+		if (tmp == 2 || tmp == 4 || tmp == 6) {
+			track->textures[i].roundup_w = false;
+		}
+		tmp = (idx_value >> 3) & 0x7;
+		if (tmp == 2 || tmp == 4 || tmp == 6) {
+			track->textures[i].roundup_h = false;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4500:
+	case 0x4504:
+	case 0x4508:
+	case 0x450C:
+	case 0x4510:
+	case 0x4514:
+	case 0x4518:
+	case 0x451C:
+	case 0x4520:
+	case 0x4524:
+	case 0x4528:
+	case 0x452C:
+	case 0x4530:
+	case 0x4534:
+	case 0x4538:
+	case 0x453C:
+		/* TX_FORMAT2_[0-15] */
+		i = (reg - 0x4500) >> 2;
+		tmp = idx_value & 0x3FFF;
+		track->textures[i].pitch = tmp + 1;
+		if (p->rdev->family >= CHIP_RV515) {
+			tmp = ((idx_value >> 15) & 1) << 11;
+			track->textures[i].width_11 = tmp;
+			tmp = ((idx_value >> 16) & 1) << 11;
+			track->textures[i].height_11 = tmp;
+
+			/* ATI1N */
+			if (idx_value & (1 << 14)) {
+				/* The same rules apply as for DXT1. */
+				track->textures[i].compress_format =
+					R100_TRACK_COMP_DXT1;
+			}
+		} else if (idx_value & (1 << 14)) {
+			DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+			return -EINVAL;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4480:
+	case 0x4484:
+	case 0x4488:
+	case 0x448C:
+	case 0x4490:
+	case 0x4494:
+	case 0x4498:
+	case 0x449C:
+	case 0x44A0:
+	case 0x44A4:
+	case 0x44A8:
+	case 0x44AC:
+	case 0x44B0:
+	case 0x44B4:
+	case 0x44B8:
+	case 0x44BC:
+		/* TX_FORMAT0_[0-15] */
+		i = (reg - 0x4480) >> 2;
+		tmp = idx_value & 0x7FF;
+		track->textures[i].width = tmp + 1;
+		tmp = (idx_value >> 11) & 0x7FF;
+		track->textures[i].height = tmp + 1;
+		tmp = (idx_value >> 26) & 0xF;
+		track->textures[i].num_levels = tmp;
+		tmp = idx_value & (1U << 31);
+		track->textures[i].use_pitch = !!tmp;
+		tmp = (idx_value >> 22) & 0xF;
+		track->textures[i].txdepth = tmp;
+		track->tex_dirty = true;
+		break;
+	case R300_ZB_ZPASS_ADDR:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case 0x4e0c:
+		/* RB3D_COLOR_CHANNEL_MASK */
+		track->color_channel_mask = idx_value;
+		track->cb_dirty = true;
+		break;
+	case 0x43a4:
+		/* SC_HYPERZ_EN */
+		/* r300c emits this register - we need to disable hyperz for it
+		 * without complaining */
+		if (p->rdev->hyperz_filp != p->filp) {
+			if (idx_value & 0x1)
+				ib[idx] = idx_value & ~1;
+		}
+		break;
+	case 0x4f1c:
+		/* ZB_BW_CNTL */
+		track->zb_cb_clear = !!(idx_value & (1 << 5));
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		if (p->rdev->hyperz_filp != p->filp) {
+			if (idx_value & (R300_HIZ_ENABLE |
+					 R300_RD_COMP_ENABLE |
+					 R300_WR_COMP_ENABLE |
+					 R300_FAST_FILL_ENABLE))
+				goto fail;
+		}
+		break;
+	case 0x4e04:
+		/* RB3D_BLENDCNTL */
+		track->blend_read_enable = !!(idx_value & (1 << 2));
+		track->cb_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_OFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->aa.robj = reloc->robj;
+		track->aa.offset = idx_value;
+		track->aa_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_RB3D_AARESOLVE_PITCH:
+		track->aa.pitch = idx_value & 0x3FFE;
+		track->aa_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_CTL:
+		track->aaresolve = idx_value & 0x1;
+		track->aa_dirty = true;
+		break;
+	case 0x4f30: /* ZB_MASK_OFFSET */
+	case 0x4f34: /* ZB_ZMASK_PITCH */
+	case 0x4f44: /* ZB_HIZ_OFFSET */
+	case 0x4f54: /* ZB_HIZ_PITCH */
+		if (idx_value && (p->rdev->hyperz_filp != p->filp))
+			goto fail;
+		break;
+	case 0x4028:
+		if (idx_value && (p->rdev->hyperz_filp != p->filp))
+			goto fail;
+		/* GB_Z_PEQ_CONFIG */
+		if (p->rdev->family >= CHIP_RV350)
+			break;
+		goto fail;
+		break;
+	case 0x4be8:
+		/* valid register only on RV530 */
+		if (p->rdev->family == CHIP_RV530)
+			break;
+		/* fallthrough do not move */
+	default:
+		goto fail;
+	}
+	return 0;
+fail:
+	DRM_ERROR("Forbidden register 0x%04X in cs at %d (val=%08x)\n",
+	       reg, idx, idx_value);
+	return -EINVAL;
+}
+
+static int r300_packet3_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	unsigned idx;
+	int r;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	track = (struct r100_cs_track *)p->track;
+	switch(pkt->opcode) {
+	case PACKET3_3D_LOAD_VBPNTR:
+		r = r100_packet3_load_vbpntr(p, pkt, idx);
+		if (r)
+			return r;
+		break;
+	case PACKET3_INDX_BUFFER:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
+		if (r) {
+			return r;
+		}
+		break;
+	/* Draw packet */
+	case PACKET3_3D_DRAW_IMMD:
+		/* Number of dwords is vtx_size * (num_vertices - 1)
+		 * PRIM_WALK must be equal to 3 vertex data in embedded
+		 * in cmd stream */
+		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_IMMD_2:
+		/* Number of dwords is vtx_size * (num_vertices - 1)
+		 * PRIM_WALK must be equal to 3 vertex data in embedded
+		 * in cmd stream */
+		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		track->immd_dwords = pkt->count;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_VBUF:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_VBUF_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_INDX:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_INDX_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_CLEAR_HIZ:
+	case PACKET3_3D_CLEAR_ZMASK:
+		if (p->rdev->hyperz_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_3D_CLEAR_CMASK:
+		if (p->rdev->cmask_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r300_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r100_cs_track *track;
+	int r;
+
+	track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (track == NULL)
+		return -ENOMEM;
+	r100_cs_track_clear(p->rdev, track);
+	p->track = track;
+	do {
+		r = r100_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case PACKET_TYPE0:
+			r = r100_cs_parse_packet0(p, &pkt,
+						  p->rdev->config.r300.reg_safe_bm,
+						  p->rdev->config.r300.reg_safe_bm_size,
+						  &r300_packet0_check);
+			break;
+		case PACKET_TYPE2:
+			break;
+		case PACKET_TYPE3:
+			r = r300_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return -EINVAL;
+		}
+		if (r) {
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return r;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+	free(p->track, DRM_MEM_DRIVER);
+	p->track = NULL;
+	return 0;
+}
+
+void r300_set_reg_safe(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
+}
+
+void r300_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+	int r;
+
+	r = r100_debugfs_mc_info_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+	}
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(R_00014C_MC_AGP_LOCATION,
+			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32(R_00015C_AGP_BASE_2,
+			upper_32_bits(rdev->mc.agp_base) & 0xff);
+	} else {
+		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+		WREG32(R_000170_AGP_BASE, 0);
+		WREG32(R_00015C_AGP_BASE_2, 0);
+	}
+	/* Wait for mc idle */
+	if (r300_mc_wait_for_idle(rdev))
+		DRM_INFO("Failed to wait MC idle before programming MC.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	r100_mc_resume(rdev, &save);
+}
+
+void r300_clock_startup(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_legacy_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
+		tmp |= S_00000D_FORCE_VAP(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r300_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r300_mc_program(rdev);
+	/* Resume clock */
+	r300_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	r300_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	if (rdev->family == CHIP_R300 ||
+	    rdev->family == CHIP_R350 ||
+	    rdev->family == CHIP_RV350)
+		r100_enable_bm(rdev);
+
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r300_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r300_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r300_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r300_suspend(struct radeon_device *rdev)
+{
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r300_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+int r300_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Set asic errata */
+	r300_errata(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r300_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r300_set_reg_safe(rdev);
+
+	rdev->accel_working = true;
+	r = r300_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCIE)
+			rv370_pcie_gart_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/r300.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r300_cmdbuf.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r300_cmdbuf.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r300_cmdbuf.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1186 @@
+/* $MidnightBSD$ */
+/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc.  2002.
+ * Copyright (C) 2004 Nicolai Haehnle.
+ * All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Nicolai Haehnle <prefect_ at gmx.net>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r300_cmdbuf.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_buffer.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#define R300_SIMULTANEOUS_CLIPRECTS		4
+
+/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
+ */
+static const int r300_cliprect_cntl[4] = {
+	0xAAAA,
+	0xEEEE,
+	0xFEFE,
+	0xFFFE
+};
+
+/**
+ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
+ * buffer, starting with index n.
+ */
+static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+			       drm_radeon_kcmd_buffer_t *cmdbuf, int n)
+{
+	struct drm_clip_rect box;
+	int nr;
+	int i;
+	RING_LOCALS;
+
+	nr = cmdbuf->nbox - n;
+	if (nr > R300_SIMULTANEOUS_CLIPRECTS)
+		nr = R300_SIMULTANEOUS_CLIPRECTS;
+
+	DRM_DEBUG("%i cliprects\n", nr);
+
+	if (nr) {
+		BEGIN_RING(6 + nr * 2);
+		OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
+
+		for (i = 0; i < nr; ++i) {
+			if (DRM_COPY_FROM_USER_UNCHECKED
+			    (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
+				DRM_ERROR("copy cliprect faulted\n");
+				return -EFAULT;
+			}
+
+			box.x2--; /* Hardware expects inclusive bottom-right corner */
+			box.y2--;
+
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+				box.x1 = (box.x1) &
+					R300_CLIPRECT_MASK;
+				box.y1 = (box.y1) &
+					R300_CLIPRECT_MASK;
+				box.x2 = (box.x2) &
+					R300_CLIPRECT_MASK;
+				box.y2 = (box.y2) &
+					R300_CLIPRECT_MASK;
+			} else {
+				box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+			}
+
+			OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
+				 (box.y1 << R300_CLIPRECT_Y_SHIFT));
+			OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
+				 (box.y2 << R300_CLIPRECT_Y_SHIFT));
+
+		}
+
+		OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
+
+		/* TODO/SECURITY: Force scissors to a safe value, otherwise the
+		 * client might be able to trample over memory.
+		 * The impact should be very limited, but I'd rather be safe than
+		 * sorry.
+		 */
+		OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
+		OUT_RING(0);
+		OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
+		ADVANCE_RING();
+	} else {
+		/* Why we allow zero cliprect rendering:
+		 * There are some commands in a command buffer that must be submitted
+		 * even when there are no cliprects, e.g. DMA buffer discard
+		 * or state setting (though state setting could be avoided by
+		 * simulating a loss of context).
+		 *
+		 * Now since the cmdbuf interface is so chaotic right now (and is
+		 * bound to remain that way for a bit until things settle down),
+		 * it is basically impossible to filter out the commands that are
+		 * necessary and those that aren't.
+		 *
+		 * So I choose the safe way and don't do any filtering at all;
+		 * instead, I simply set up the engine so that all rendering
+		 * can't produce any fragments.
+		 */
+		BEGIN_RING(2);
+		OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
+		ADVANCE_RING();
+	}
+
+	/* flus cache and wait idle clean after cliprect change */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(R300_RB3D_DC_FLUSH);
+	ADVANCE_RING();
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+	ADVANCE_RING();
+	/* set flush flag */
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+	return 0;
+}
+
+static u8 r300_reg_flags[0x10000 >> 2];
+
+void r300_init_reg_flags(struct drm_device *dev)
+{
+	int i;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	memset(r300_reg_flags, 0, 0x10000 >> 2);
+#define ADD_RANGE_MARK(reg, count,mark) \
+		for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
+			r300_reg_flags[i]|=(mark);
+
+#define MARK_SAFE		1
+#define MARK_CHECK_OFFSET	2
+
+#define ADD_RANGE(reg, count)	ADD_RANGE_MARK(reg, count, MARK_SAFE)
+
+	/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
+	ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
+	ADD_RANGE(R300_VAP_CNTL, 1);
+	ADD_RANGE(R300_SE_VTE_CNTL, 2);
+	ADD_RANGE(0x2134, 2);
+	ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
+	ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
+	ADD_RANGE(0x21DC, 1);
+	ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
+	ADD_RANGE(R300_VAP_CLIP_X_0, 4);
+	ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
+	ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
+	ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
+	ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
+	ADD_RANGE(R300_GB_ENABLE, 1);
+	ADD_RANGE(R300_GB_MSPOS0, 5);
+	ADD_RANGE(R300_TX_INVALTAGS, 1);
+	ADD_RANGE(R300_TX_ENABLE, 1);
+	ADD_RANGE(0x4200, 4);
+	ADD_RANGE(0x4214, 1);
+	ADD_RANGE(R300_RE_POINTSIZE, 1);
+	ADD_RANGE(0x4230, 3);
+	ADD_RANGE(R300_RE_LINE_CNT, 1);
+	ADD_RANGE(R300_RE_UNK4238, 1);
+	ADD_RANGE(0x4260, 3);
+	ADD_RANGE(R300_RE_SHADE, 4);
+	ADD_RANGE(R300_RE_POLYGON_MODE, 5);
+	ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
+	ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
+	ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
+	ADD_RANGE(R300_RE_CULL_CNTL, 1);
+	ADD_RANGE(0x42C0, 2);
+	ADD_RANGE(R300_RS_CNTL_0, 2);
+
+	ADD_RANGE(R300_SU_REG_DEST, 1);
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530)
+		ADD_RANGE(RV530_FG_ZBREG_DEST, 1);
+
+	ADD_RANGE(R300_SC_HYPERZ, 2);
+	ADD_RANGE(0x43E8, 1);
+
+	ADD_RANGE(0x46A4, 5);
+
+	ADD_RANGE(R300_RE_FOG_STATE, 1);
+	ADD_RANGE(R300_FOG_COLOR_R, 3);
+	ADD_RANGE(R300_PP_ALPHA_TEST, 2);
+	ADD_RANGE(0x4BD8, 1);
+	ADD_RANGE(R300_PFS_PARAM_0_X, 64);
+	ADD_RANGE(0x4E00, 1);
+	ADD_RANGE(R300_RB3D_CBLEND, 2);
+	ADD_RANGE(R300_RB3D_COLORMASK, 1);
+	ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
+	ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);	/* check offset */
+	ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
+	ADD_RANGE(0x4E50, 9);
+	ADD_RANGE(0x4E88, 1);
+	ADD_RANGE(0x4EA0, 2);
+	ADD_RANGE(R300_ZB_CNTL, 3);
+	ADD_RANGE(R300_ZB_FORMAT, 4);
+	ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);	/* check offset */
+	ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
+	ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
+	ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
+	ADD_RANGE(R300_ZB_ZPASS_DATA, 2); /* ZB_ZPASS_DATA, ZB_ZPASS_ADDR */
+
+	ADD_RANGE(R300_TX_FILTER_0, 16);
+	ADD_RANGE(R300_TX_FILTER1_0, 16);
+	ADD_RANGE(R300_TX_SIZE_0, 16);
+	ADD_RANGE(R300_TX_FORMAT_0, 16);
+	ADD_RANGE(R300_TX_PITCH_0, 16);
+	/* Texture offset is dangerous and needs more checking */
+	ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
+	ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
+	ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
+
+	/* Sporadic registers used as primitives are emitted */
+	ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
+	ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
+	ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
+	ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+		ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
+		ADD_RANGE(R500_US_CONFIG, 2);
+		ADD_RANGE(R500_US_CODE_ADDR, 3);
+		ADD_RANGE(R500_US_FC_CTRL, 1);
+		ADD_RANGE(R500_RS_IP_0, 16);
+		ADD_RANGE(R500_RS_INST_0, 16);
+		ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
+		ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
+		ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
+	} else {
+		ADD_RANGE(R300_PFS_CNTL_0, 3);
+		ADD_RANGE(R300_PFS_NODE_0, 4);
+		ADD_RANGE(R300_PFS_TEXI_0, 64);
+		ADD_RANGE(R300_PFS_INSTR0_0, 64);
+		ADD_RANGE(R300_PFS_INSTR1_0, 64);
+		ADD_RANGE(R300_PFS_INSTR2_0, 64);
+		ADD_RANGE(R300_PFS_INSTR3_0, 64);
+		ADD_RANGE(R300_RS_INTERP_0, 8);
+		ADD_RANGE(R300_RS_ROUTE_0, 8);
+
+	}
+}
+
+static __inline__ int r300_check_range(unsigned reg, int count)
+{
+	int i;
+	if (reg & ~0xffff)
+		return -1;
+	for (i = (reg >> 2); i < (reg >> 2) + count; i++)
+		if (r300_reg_flags[i] != MARK_SAFE)
+			return 1;
+	return 0;
+}
+
+static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
+							  dev_priv,
+							  drm_radeon_kcmd_buffer_t
+							  * cmdbuf,
+							  drm_r300_cmd_header_t
+							  header)
+{
+	int reg;
+	int sz;
+	int i;
+	u32 *value;
+	RING_LOCALS;
+
+	sz = header.packet0.count;
+	reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+	if ((sz > 64) || (sz < 0)) {
+		DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
+			 reg, sz);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < sz; i++) {
+		switch (r300_reg_flags[(reg >> 2) + i]) {
+		case MARK_SAFE:
+			break;
+		case MARK_CHECK_OFFSET:
+			value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+			if (!radeon_check_offset(dev_priv, *value)) {
+				DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
+					 reg, sz);
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Register %04x failed check as flag=%02x\n",
+				reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
+			return -EINVAL;
+		}
+	}
+
+	BEGIN_RING(1 + sz);
+	OUT_RING(CP_PACKET0(reg, sz - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Emits a packet0 setting arbitrary registers.
+ * Called by r300_do_cp_cmdbuf.
+ *
+ * Note that checks are performed on contents and addresses of the registers
+ */
+static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					drm_r300_cmd_header_t header)
+{
+	int reg;
+	int sz;
+	RING_LOCALS;
+
+	sz = header.packet0.count;
+	reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+	if (!sz)
+		return 0;
+
+	if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	if (reg + sz * 4 >= 0x10000) {
+		DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
+			  sz);
+		return -EINVAL;
+	}
+
+	if (r300_check_range(reg, sz)) {
+		/* go and check everything */
+		return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
+							   header);
+	}
+	/* the rest of the data is safe to emit, whatever the values the user passed */
+
+	BEGIN_RING(1 + sz);
+	OUT_RING(CP_PACKET0(reg, sz - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
+				    drm_radeon_kcmd_buffer_t *cmdbuf,
+				    drm_r300_cmd_header_t header)
+{
+	int sz;
+	int addr;
+	RING_LOCALS;
+
+	sz = header.vpu.count;
+	addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
+
+	if (!sz)
+		return 0;
+	if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	/* VAP is very sensitive so we purge cache before we program it
+	 * and we also flush its state before & after */
+	BEGIN_RING(6);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(R300_RB3D_DC_FLUSH);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+	OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
+	OUT_RING(0);
+	ADVANCE_RING();
+	/* set flush flag */
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+	BEGIN_RING(3 + sz * 4);
+	OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
+	OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
+	ADVANCE_RING();
+
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
+	OUT_RING(0);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Emit a clear packet from userspace.
+ * Called by r300_emit_packet3.
+ */
+static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
+				      drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	RING_LOCALS;
+
+	if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	BEGIN_RING(10);
+	OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
+	OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
+		 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
+	ADVANCE_RING();
+
+	BEGIN_RING(4);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(R300_RB3D_DC_FLUSH);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+	ADVANCE_RING();
+	/* set flush flag */
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+	return 0;
+}
+
+static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
+					       drm_radeon_kcmd_buffer_t *cmdbuf,
+					       u32 header)
+{
+	int count, i, k;
+#define MAX_ARRAY_PACKET  64
+	u32 *data;
+	u32 narrays;
+	RING_LOCALS;
+
+	count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+	if ((count + 1) > MAX_ARRAY_PACKET) {
+		DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+			  count);
+		return -EINVAL;
+	}
+	/* carefully check packet contents */
+
+	/* We have already read the header so advance the buffer. */
+	drm_buffer_advance(cmdbuf->buffer, 4);
+
+	narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+	k = 0;
+	i = 1;
+	while ((k < narrays) && (i < (count + 1))) {
+		i++;		/* skip attribute field */
+		data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+		if (!radeon_check_offset(dev_priv, *data)) {
+			DRM_ERROR
+			    ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
+			     k, i);
+			return -EINVAL;
+		}
+		k++;
+		i++;
+		if (k == narrays)
+			break;
+		/* have one more to process, they come in pairs */
+		data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+		if (!radeon_check_offset(dev_priv, *data)) {
+			DRM_ERROR
+			    ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
+			     k, i);
+			return -EINVAL;
+		}
+		k++;
+		i++;
+	}
+	/* do the counts match what we expect ? */
+	if ((k != narrays) || (i != (count + 1))) {
+		DRM_ERROR
+		    ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
+		     k, i, narrays, count + 1);
+		return -EINVAL;
+	}
+
+	/* all clear, output packet */
+
+	BEGIN_RING(count + 2);
+	OUT_RING(header);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
+					     drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+	int count, ret;
+	RING_LOCALS;
+
+
+	count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+	if (*cmd & 0x8000) {
+		u32 offset;
+		u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+			      | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+
+			u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+			offset = *cmd2 << 10;
+			ret = !radeon_check_offset(dev_priv, offset);
+			if (ret) {
+				DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
+				return -EINVAL;
+			}
+		}
+
+		if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+		    (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+			offset = *cmd3 << 10;
+			ret = !radeon_check_offset(dev_priv, offset);
+			if (ret) {
+				DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
+				return -EINVAL;
+			}
+
+		}
+	}
+
+	BEGIN_RING(count+2);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
+					    drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+	u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+	int count;
+	int expected_count;
+	RING_LOCALS;
+
+	count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+	expected_count = *cmd1 >> 16;
+	if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
+		expected_count = (expected_count+1)/2;
+
+	if (count && count != expected_count) {
+		DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
+			count, expected_count);
+		return -EINVAL;
+	}
+
+	BEGIN_RING(count+2);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+	ADVANCE_RING();
+
+	if (!count) {
+		drm_r300_cmd_header_t stack_header, *header;
+		u32 *cmd1, *cmd2, *cmd3;
+
+		if (drm_buffer_unprocessed(cmdbuf->buffer)
+				< 4*4 + sizeof(stack_header)) {
+			DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
+			return -EINVAL;
+		}
+
+		header = drm_buffer_read_object(cmdbuf->buffer,
+				sizeof(stack_header), &stack_header);
+
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+		cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+		cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+
+		if (header->header.cmd_type != R300_CMD_PACKET3 ||
+		    header->packet3.packet != R300_CMD_PACKET3_RAW ||
+		    *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
+			DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
+			return -EINVAL;
+		}
+
+		if ((*cmd1 & 0x8000ffff) != 0x80000810) {
+			DRM_ERROR("Invalid indx_buffer reg address %08X\n",
+					*cmd1);
+			return -EINVAL;
+		}
+		if (!radeon_check_offset(dev_priv, *cmd2)) {
+			DRM_ERROR("Invalid indx_buffer offset is %08X\n",
+					*cmd2);
+			return -EINVAL;
+		}
+		if (*cmd3 != expected_count) {
+			DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
+				*cmd3, expected_count);
+			return -EINVAL;
+		}
+
+		BEGIN_RING(4);
+		OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
+		ADVANCE_RING();
+	}
+
+	return 0;
+}
+
+static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
+					    drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 *header;
+	int count;
+	RING_LOCALS;
+
+	if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	/* Fixme !! This simply emits a packet without much checking.
+	   We need to be smarter. */
+
+	/* obtain first word - actual packet3 header */
+	header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+
+	/* Is it packet 3 ? */
+	if ((*header >> 30) != 0x3) {
+		DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
+		return -EINVAL;
+	}
+
+	count = (*header >> 16) & 0x3fff;
+
+	/* Check again now that we know how much data to expect */
+	if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
+		DRM_ERROR
+		    ("Expected packet3 of length %d but have only %d bytes left\n",
+		     (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
+		return -EINVAL;
+	}
+
+	/* Is it a packet type we know about ? */
+	switch (*header & 0xff00) {
+	case RADEON_3D_LOAD_VBPNTR:	/* load vertex array pointers */
+		return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
+
+	case RADEON_CNTL_BITBLT_MULTI:
+		return r300_emit_bitblt_multi(dev_priv, cmdbuf);
+
+	case RADEON_CP_INDX_BUFFER:
+		DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
+		return -EINVAL;
+	case RADEON_CP_3D_DRAW_IMMD_2:
+		/* triggers drawing using in-packet vertex data */
+	case RADEON_CP_3D_DRAW_VBUF_2:
+		/* triggers drawing of vertex buffers setup elsewhere */
+		dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+					   RADEON_PURGE_EMITED);
+		break;
+	case RADEON_CP_3D_DRAW_INDX_2:
+		/* triggers drawing using indices to vertex buffer */
+		/* whenever we send vertex we clear flush & purge */
+		dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+					   RADEON_PURGE_EMITED);
+		return r300_emit_draw_indx_2(dev_priv, cmdbuf);
+	case RADEON_WAIT_FOR_IDLE:
+	case RADEON_CP_NOP:
+		/* these packets are safe */
+		break;
+	default:
+		DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
+		return -EINVAL;
+	}
+
+	BEGIN_RING(count + 2);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Emit a rendering packet3 from userspace.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					drm_r300_cmd_header_t header)
+{
+	int n;
+	int ret;
+	int orig_iter = cmdbuf->buffer->iterator;
+
+	/* This is a do-while-loop so that we run the interior at least once,
+	 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
+	 */
+	n = 0;
+	do {
+		if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
+			ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
+			if (ret)
+				return ret;
+
+			cmdbuf->buffer->iterator = orig_iter;
+		}
+
+		switch (header.packet3.packet) {
+		case R300_CMD_PACKET3_CLEAR:
+			DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
+			ret = r300_emit_clear(dev_priv, cmdbuf);
+			if (ret) {
+				DRM_ERROR("r300_emit_clear failed\n");
+				return ret;
+			}
+			break;
+
+		case R300_CMD_PACKET3_RAW:
+			DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
+			ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
+			if (ret) {
+				DRM_ERROR("r300_emit_raw_packet3 failed\n");
+				return ret;
+			}
+			break;
+
+		default:
+			DRM_ERROR("bad packet3 type %i at byte %d\n",
+				  header.packet3.packet,
+				  cmdbuf->buffer->iterator - (int)sizeof(header));
+			return -EINVAL;
+		}
+
+		n += R300_SIMULTANEOUS_CLIPRECTS;
+	} while (n < cmdbuf->nbox);
+
+	return 0;
+}
+
+/* Some of the R300 chips seem to be extremely touchy about the two registers
+ * that are configured in r300_pacify.
+ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
+ * sends a command buffer that contains only state setting commands and a
+ * vertex program/parameter upload sequence, this will eventually lead to a
+ * lockup, unless the sequence is bracketed by calls to r300_pacify.
+ * So we should take great care to *always* call r300_pacify before
+ * *anything* 3D related, and again afterwards. This is what the
+ * call bracket in r300_do_cp_cmdbuf is for.
+ */
+
+/**
+ * Emit the sequence to pacify R300.
+ */
+static void r300_pacify(drm_radeon_private_t *dev_priv)
+{
+	uint32_t cache_z, cache_3d, cache_2d;
+	RING_LOCALS;
+
+	cache_z = R300_ZC_FLUSH;
+	cache_2d = R300_RB2D_DC_FLUSH;
+	cache_3d = R300_RB3D_DC_FLUSH;
+	if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
+		/* we can purge, primitive where draw since last purge */
+		cache_z |= R300_ZC_FREE;
+		cache_2d |= R300_RB2D_DC_FREE;
+		cache_3d |= R300_RB3D_DC_FREE;
+	}
+
+	/* flush & purge zbuffer */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
+	OUT_RING(cache_z);
+	ADVANCE_RING();
+	/* flush & purge 3d */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(cache_3d);
+	ADVANCE_RING();
+	/* flush & purge texture */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
+	OUT_RING(0);
+	ADVANCE_RING();
+	/* FIXME: is this one really needed ? */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
+	OUT_RING(0);
+	ADVANCE_RING();
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+	ADVANCE_RING();
+	/* flush & purge 2d through E2 as RB2D will trigger lockup */
+	BEGIN_RING(4);
+	OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(cache_2d);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
+		 RADEON_WAIT_HOST_IDLECLEAN);
+	ADVANCE_RING();
+	/* set flush & purge flags */
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
+}
+
+/**
+ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
+ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
+ * be careful about how this function is called.
+ */
+static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
+{
+	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+	buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
+	buf->pending = 1;
+	buf->used = 0;
+}
+
+static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
+			  drm_r300_cmd_header_t header)
+{
+	u32 wait_until;
+	RING_LOCALS;
+
+	if (!header.wait.flags)
+		return;
+
+	wait_until = 0;
+
+	switch(header.wait.flags) {
+	case R300_WAIT_2D:
+		wait_until = RADEON_WAIT_2D_IDLE;
+		break;
+	case R300_WAIT_3D:
+		wait_until = RADEON_WAIT_3D_IDLE;
+		break;
+	case R300_NEW_WAIT_2D_3D:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
+		break;
+	case R300_NEW_WAIT_2D_2D_CLEAN:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
+		break;
+	case R300_NEW_WAIT_3D_3D_CLEAN:
+		wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
+		break;
+	case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
+		wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
+		break;
+	default:
+		return;
+	}
+
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(wait_until);
+	ADVANCE_RING();
+}
+
+static int r300_scratch(drm_radeon_private_t *dev_priv,
+			drm_radeon_kcmd_buffer_t *cmdbuf,
+			drm_r300_cmd_header_t header)
+{
+	u32 *ref_age_base;
+	u32 i, *buf_idx, h_pending;
+	u64 *ptr_addr;
+	u64 stack_ptr_addr;
+	RING_LOCALS;
+
+	if (drm_buffer_unprocessed(cmdbuf->buffer) <
+	    (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
+		return -EINVAL;
+	}
+
+	if (header.scratch.reg >= 5) {
+		return -EINVAL;
+	}
+
+	dev_priv->scratch_ages[header.scratch.reg]++;
+
+	ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
+			sizeof(stack_ptr_addr), &stack_ptr_addr);
+	ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
+
+	for (i=0; i < header.scratch.n_bufs; i++) {
+		buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+		*buf_idx *= 2; /* 8 bytes per buf */
+
+		if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+				&dev_priv->scratch_ages[header.scratch.reg],
+				sizeof(u32)))
+			return -EINVAL;
+
+		if (DRM_COPY_FROM_USER(&h_pending,
+				ref_age_base + *buf_idx + 1,
+				sizeof(u32)))
+			return -EINVAL;
+
+		if (h_pending == 0)
+			return -EINVAL;
+
+		h_pending--;
+
+		if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+					&h_pending,
+					sizeof(u32)))
+			return -EINVAL;
+
+		drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
+	}
+
+	BEGIN_RING(2);
+	OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
+	OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
+				       drm_radeon_kcmd_buffer_t *cmdbuf,
+				       drm_r300_cmd_header_t header)
+{
+	int sz;
+	int addr;
+	int type;
+	int isclamp;
+	int stride;
+	RING_LOCALS;
+
+	sz = header.r500fp.count;
+	/* address is 9 bits 0 - 8, bit 1 of flags is part of address */
+	addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
+
+	type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
+	isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
+
+	addr |= (type << 16);
+	addr |= (isclamp << 17);
+
+	stride = type ? 4 : 6;
+
+	DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
+	if (!sz)
+		return 0;
+	if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	BEGIN_RING(3 + sz * stride);
+	OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
+	OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
+
+	ADVANCE_RING();
+
+	return 0;
+}
+
+
+/**
+ * Parses and validates a user-supplied command buffer and emits appropriate
+ * commands on the DMA ring buffer.
+ * Called by the ioctl handler function radeon_cp_cmdbuf.
+ */
+int r300_do_cp_cmdbuf(struct drm_device *dev,
+		      struct drm_file *file_priv,
+		      drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf = NULL;
+	int emit_dispatch_age = 0;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+
+	/* pacify */
+	r300_pacify(dev_priv);
+
+	if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
+		ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
+		if (ret)
+			goto cleanup;
+	}
+
+	while (drm_buffer_unprocessed(cmdbuf->buffer)
+			>= sizeof(drm_r300_cmd_header_t)) {
+		int idx;
+		drm_r300_cmd_header_t *header, stack_header;
+
+		header = drm_buffer_read_object(cmdbuf->buffer,
+				sizeof(stack_header), &stack_header);
+
+		switch (header->header.cmd_type) {
+		case R300_CMD_PACKET0:
+			DRM_DEBUG("R300_CMD_PACKET0\n");
+			ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_emit_packet0 failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_VPU:
+			DRM_DEBUG("R300_CMD_VPU\n");
+			ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_emit_vpu failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_PACKET3:
+			DRM_DEBUG("R300_CMD_PACKET3\n");
+			ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_emit_packet3 failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_END3D:
+			DRM_DEBUG("R300_CMD_END3D\n");
+			/* TODO:
+			   Ideally userspace driver should not need to issue this call,
+			   i.e. the drm driver should issue it automatically and prevent
+			   lockups.
+
+			   In practice, we do not understand why this call is needed and what
+			   it does (except for some vague guesses that it has to do with cache
+			   coherence) and so the user space driver does it.
+
+			   Once we are sure which uses prevent lockups the code could be moved
+			   into the kernel and the userspace driver will not
+			   need to use this command.
+
+			   Note that issuing this command does not hurt anything
+			   except, possibly, performance */
+			r300_pacify(dev_priv);
+			break;
+
+		case R300_CMD_CP_DELAY:
+			/* simple enough, we can do it here */
+			DRM_DEBUG("R300_CMD_CP_DELAY\n");
+			{
+				int i;
+				RING_LOCALS;
+
+				BEGIN_RING(header->delay.count);
+				for (i = 0; i < header->delay.count; i++)
+					OUT_RING(RADEON_CP_PACKET2);
+				ADVANCE_RING();
+			}
+			break;
+
+		case R300_CMD_DMA_DISCARD:
+			DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+			idx = header->dma.buf_idx;
+			if (idx < 0 || idx >= dma->buf_count) {
+				DRM_ERROR("buffer index %d (of %d max)\n",
+					  idx, dma->buf_count - 1);
+				ret = -EINVAL;
+				goto cleanup;
+			}
+
+			buf = dma->buflist[idx];
+			if (buf->file_priv != file_priv || buf->pending) {
+				DRM_ERROR("bad buffer %p %p %d\n",
+					  buf->file_priv, file_priv,
+					  buf->pending);
+				ret = -EINVAL;
+				goto cleanup;
+			}
+
+			emit_dispatch_age = 1;
+			r300_discard_buffer(dev, file_priv->master, buf);
+			break;
+
+		case R300_CMD_WAIT:
+			DRM_DEBUG("R300_CMD_WAIT\n");
+			r300_cmd_wait(dev_priv, *header);
+			break;
+
+		case R300_CMD_SCRATCH:
+			DRM_DEBUG("R300_CMD_SCRATCH\n");
+			ret = r300_scratch(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_scratch failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_R500FP:
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
+				DRM_ERROR("Calling r500 command on r300 card\n");
+				ret = -EINVAL;
+				goto cleanup;
+			}
+			DRM_DEBUG("R300_CMD_R500FP\n");
+			ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_emit_r500fp failed\n");
+				goto cleanup;
+			}
+			break;
+		default:
+			DRM_ERROR("bad cmd_type %i at byte %d\n",
+				  header->header.cmd_type,
+				  cmdbuf->buffer->iterator - (int)sizeof(*header));
+			ret = -EINVAL;
+			goto cleanup;
+		}
+	}
+
+	DRM_DEBUG("END\n");
+
+      cleanup:
+	r300_pacify(dev_priv);
+
+	/* We emit the vertex buffer age here, outside the pacifier "brackets"
+	 * for two reasons:
+	 *  (1) This may coalesce multiple age emissions into a single one and
+	 *  (2) more importantly, some chips lock up hard when scratch registers
+	 *      are written inside the pacifier bracket.
+	 */
+	if (emit_dispatch_age) {
+		RING_LOCALS;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING(2);
+		RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
+		ADVANCE_RING();
+	}
+
+	COMMIT_RING();
+
+	return ret;
+}


Property changes on: trunk/sys/dev/drm2/radeon/r300_cmdbuf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r300_reg.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r300_reg.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r300_reg.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1793 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2005 Nicolai Haehnle et al.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Nicolai Haehnle
+ *          Jerome Glisse
+ */
+#ifndef _R300_REG_H_
+#define _R300_REG_H_
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r300_reg.h 261455 2014-02-04 03:36:42Z eadler $");
+
+#define R300_SURF_TILE_MACRO (1<<16)
+#define R300_SURF_TILE_MICRO (2<<16)
+#define R300_SURF_TILE_BOTH (3<<16)
+
+
+#define R300_MC_INIT_MISC_LAT_TIMER	0x180
+#	define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT	28
+
+#define R300_MC_INIT_GFX_LAT_TIMER	0x154
+#	define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT	28
+
+/*
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ * I am fairly certain that they are correct unless stated otherwise
+ * in comments.
+ */
+
+#define R300_SE_VPORT_XSCALE                0x1D98
+#define R300_SE_VPORT_XOFFSET               0x1D9C
+#define R300_SE_VPORT_YSCALE                0x1DA0
+#define R300_SE_VPORT_YOFFSET               0x1DA4
+#define R300_SE_VPORT_ZSCALE                0x1DA8
+#define R300_SE_VPORT_ZOFFSET               0x1DAC
+
+
+/*
+ * Vertex Array Processing (VAP) Control
+ * Stolen from r200 code from Christoph Brill (It's a guess!)
+ */
+#define R300_VAP_CNTL	0x2080
+
+/* This register is written directly and also starts data section
+ * in many 3d CP_PACKET3's
+ */
+#define R300_VAP_VF_CNTL	0x2084
+#	define	R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT              0
+#	define  R300_VAP_VF_CNTL__PRIM_NONE                     (0<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POINTS                   (1<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINES                    (2<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP               (3<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLES                (4<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN             (5<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP           (6<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP                (12<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUADS                    (13<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP               (14<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POLYGON                  (15<<0)
+
+#	define	R300_VAP_VF_CNTL__PRIM_WALK__SHIFT              4
+	/* State based - direct writes to registers trigger vertex
+           generation */
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED         (0<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_INDICES             (1<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST         (2<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED     (3<<4)
+
+	/* I don't think I saw these three used.. */
+#	define	R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT            6
+#	define	R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT     9
+#	define	R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT        10
+
+	/* index size - when not set the indices are assumed to be 16 bit */
+#	define	R300_VAP_VF_CNTL__INDEX_SIZE_32bit              (1<<11)
+	/* number of vertices */
+#	define	R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT           16
+
+/* BEGIN: Wild guesses */
+#define R300_VAP_OUTPUT_VTX_FMT_0           0x2090
+#       define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT     (1<<0)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT   (1<<1)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
+
+#define R300_VAP_OUTPUT_VTX_FMT_1           0x2094
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+/* END: Wild guesses */
+
+#define R300_SE_VTE_CNTL                  0x20b0
+#	define     R300_VPORT_X_SCALE_ENA                0x00000001
+#	define     R300_VPORT_X_OFFSET_ENA               0x00000002
+#	define     R300_VPORT_Y_SCALE_ENA                0x00000004
+#	define     R300_VPORT_Y_OFFSET_ENA               0x00000008
+#	define     R300_VPORT_Z_SCALE_ENA                0x00000010
+#	define     R300_VPORT_Z_OFFSET_ENA               0x00000020
+#	define     R300_VTX_XY_FMT                       0x00000100
+#	define     R300_VTX_Z_FMT                        0x00000200
+#	define     R300_VTX_W0_FMT                       0x00000400
+#	define     R300_VTX_W0_NORMALIZE                 0x00000800
+#	define     R300_VTX_ST_DENORMALIZED              0x00001000
+
+/* BEGIN: Vertex data assembly - lots of uncertainties */
+
+/* gap */
+
+#define R300_VAP_CNTL_STATUS              0x2140
+#	define R300_VC_NO_SWAP                  (0 << 0)
+#	define R300_VC_16BIT_SWAP               (1 << 0)
+#	define R300_VC_32BIT_SWAP               (2 << 0)
+#	define R300_VAP_TCL_BYPASS		(1 << 8)
+
+/* gap */
+
+/* Where do we get our vertex data?
+ *
+ * Vertex data either comes either from immediate mode registers or from
+ * vertex arrays.
+ * There appears to be no mixed mode (though we can force the pitch of
+ * vertex arrays to 0, effectively reusing the same element over and over
+ * again).
+ *
+ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+ * if these registers influence vertex array processing.
+ *
+ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+ *
+ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
+ *
+ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+ * into the vertex processor's input registers.
+ * The first word routes the first input, the second word the second, etc.
+ * The corresponding input is routed into the register with the given index.
+ * The list is ended by a word with INPUT_ROUTE_END set.
+ *
+ * Always set COMPONENTS_4 in immediate mode.
+ */
+
+#define R300_VAP_INPUT_ROUTE_0_0            0x2150
+#       define R300_INPUT_ROUTE_COMPONENTS_1     (0 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_2     (1 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_3     (2 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_4     (3 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_IDX_SHIFT    8
+#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_END          (1 << 13)
+#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT            (1 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_0_1            0x2154
+#define R300_VAP_INPUT_ROUTE_0_2            0x2158
+#define R300_VAP_INPUT_ROUTE_0_3            0x215C
+#define R300_VAP_INPUT_ROUTE_0_4            0x2160
+#define R300_VAP_INPUT_ROUTE_0_5            0x2164
+#define R300_VAP_INPUT_ROUTE_0_6            0x2168
+#define R300_VAP_INPUT_ROUTE_0_7            0x216C
+
+/* gap */
+
+/* Notes:
+ *  - always set up to produce at least two attributes:
+ *    if vertex program uses only position, fglrx will set normal, too
+ *  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
+ */
+#define R300_VAP_INPUT_CNTL_0               0x2180
+#       define R300_INPUT_CNTL_0_COLOR           0x00000001
+#define R300_VAP_INPUT_CNTL_1               0x2184
+#       define R300_INPUT_CNTL_POS               0x00000001
+#       define R300_INPUT_CNTL_NORMAL            0x00000002
+#       define R300_INPUT_CNTL_COLOR             0x00000004
+#       define R300_INPUT_CNTL_TC0               0x00000400
+#       define R300_INPUT_CNTL_TC1               0x00000800
+#       define R300_INPUT_CNTL_TC2               0x00001000 /* GUESS */
+#       define R300_INPUT_CNTL_TC3               0x00002000 /* GUESS */
+#       define R300_INPUT_CNTL_TC4               0x00004000 /* GUESS */
+#       define R300_INPUT_CNTL_TC5               0x00008000 /* GUESS */
+#       define R300_INPUT_CNTL_TC6               0x00010000 /* GUESS */
+#       define R300_INPUT_CNTL_TC7               0x00020000 /* GUESS */
+
+/* gap */
+
+/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
+ * are set to a swizzling bit pattern, other words are 0.
+ *
+ * In immediate mode, the pattern is always set to xyzw. In vertex array
+ * mode, the swizzling pattern is e.g. used to set zw components in texture
+ * coordinates with only tweo components.
+ */
+#define R300_VAP_INPUT_ROUTE_1_0            0x21E0
+#       define R300_INPUT_ROUTE_SELECT_X    0
+#       define R300_INPUT_ROUTE_SELECT_Y    1
+#       define R300_INPUT_ROUTE_SELECT_Z    2
+#       define R300_INPUT_ROUTE_SELECT_W    3
+#       define R300_INPUT_ROUTE_SELECT_ZERO 4
+#       define R300_INPUT_ROUTE_SELECT_ONE  5
+#       define R300_INPUT_ROUTE_SELECT_MASK 7
+#       define R300_INPUT_ROUTE_X_SHIFT     0
+#       define R300_INPUT_ROUTE_Y_SHIFT     3
+#       define R300_INPUT_ROUTE_Z_SHIFT     6
+#       define R300_INPUT_ROUTE_W_SHIFT     9
+#       define R300_INPUT_ROUTE_ENABLE      (15 << 12)
+#define R300_VAP_INPUT_ROUTE_1_1            0x21E4
+#define R300_VAP_INPUT_ROUTE_1_2            0x21E8
+#define R300_VAP_INPUT_ROUTE_1_3            0x21EC
+#define R300_VAP_INPUT_ROUTE_1_4            0x21F0
+#define R300_VAP_INPUT_ROUTE_1_5            0x21F4
+#define R300_VAP_INPUT_ROUTE_1_6            0x21F8
+#define R300_VAP_INPUT_ROUTE_1_7            0x21FC
+
+/* END: Vertex data assembly */
+
+/* gap */
+
+/* BEGIN: Upload vertex program and data */
+
+/*
+ * The programmable vertex shader unit has a memory bank of unknown size
+ * that can be written to in 16 byte units by writing the address into
+ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+ *
+ * Pointers into the memory bank are always in multiples of 16 bytes.
+ *
+ * The memory bank is divided into areas with fixed meaning.
+ *
+ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+ * whereas the difference between known addresses suggests size 512.
+ *
+ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+ * Native reported limits and the VPI layout suggest size 256, whereas
+ * difference between known addresses suggests size 512.
+ *
+ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+ * floating point pointsize. The exact purpose of this state is uncertain,
+ * as there is also the R300_RE_POINTSIZE register.
+ *
+ * Multiple vertex programs and parameter sets can be loaded at once,
+ * which could explain the size discrepancy.
+ */
+#define R300_VAP_PVS_UPLOAD_ADDRESS         0x2200
+#       define R300_PVS_UPLOAD_PROGRAM           0x00000000
+#       define R300_PVS_UPLOAD_PARAMETERS        0x00000200
+#       define R300_PVS_UPLOAD_POINTSIZE         0x00000406
+
+/* gap */
+
+#define R300_VAP_PVS_UPLOAD_DATA            0x2208
+
+/* END: Upload vertex program and data */
+
+/* gap */
+
+/* I do not know the purpose of this register. However, I do know that
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ */
+#define R300_VAP_UNKNOWN_221C               0x221C
+#       define R300_221C_NORMAL                  0x00000000
+#       define R300_221C_CLEAR                   0x0001C000
+
+/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
+ * plane is per-pixel and the second plane is per-vertex.
+ *
+ * This was determined by experimentation alone but I believe it is correct.
+ *
+ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
+ */
+#define R300_VAP_CLIP_X_0                   0x2220
+#define R300_VAP_CLIP_X_1                   0x2224
+#define R300_VAP_CLIP_Y_0                   0x2228
+#define R300_VAP_CLIP_Y_1                   0x2230
+
+/* gap */
+
+/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_STATE_FLUSH_REG        0x2284
+
+/* Absolutely no clue what this register is about. */
+#define R300_VAP_UNKNOWN_2288               0x2288
+#       define R300_2288_R300                    0x00750000 /* -- nh */
+#       define R300_2288_RV350                   0x0000FFFF /* -- Vladimir */
+
+/* gap */
+
+/* Addresses are relative to the vertex program instruction area of the
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ *
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
+ * position takes place.
+ *
+ * Most likely this is used to ignore rest of the program in cases
+ * where group of verts arent visible. For some reason this "section"
+ * is sometimes accepted other instruction that have no relationship with
+ * position calculations.
+ */
+#define R300_VAP_PVS_CNTL_1                 0x22D0
+#       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
+#       define R300_PVS_CNTL_1_POS_END_SHIFT         10
+#       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
+/* Addresses are relative the the vertex program parameters area. */
+#define R300_VAP_PVS_CNTL_2                 0x22D4
+#       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
+#       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
+#define R300_VAP_PVS_CNTL_3	           0x22D8
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
+
+/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
+ * immediate vertices
+ */
+#define R300_VAP_VTX_COLOR_R                0x2464
+#define R300_VAP_VTX_COLOR_G                0x2468
+#define R300_VAP_VTX_COLOR_B                0x246C
+#define R300_VAP_VTX_POS_0_X_1              0x2490 /* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_Y_1              0x2494
+#define R300_VAP_VTX_COLOR_PKD              0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2              0x24A0 /* used for glVertex3*() */
+#define R300_VAP_VTX_POS_0_Y_2              0x24A4
+#define R300_VAP_VTX_POS_0_Z_2              0x24A8
+/* write 0 to indicate end of packet? */
+#define R300_VAP_VTX_END_OF_PKT             0x24AC
+
+/* gap */
+
+/* These are values from r300_reg/r300_reg.h - they are known to be correct
+ * and are here so we can use one register file instead of several
+ * - Vladimir
+ */
+#define R300_GB_VAP_RASTER_VTX_FMT_0	0x4000
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT	(1<<0)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT	(1<<1)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT	(1<<2)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT	(1<<3)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT	(1<<4)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE	(0xf<<5)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT	(0x1<<16)
+
+#define R300_GB_VAP_RASTER_VTX_FMT_1	0x4004
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT	0
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT	3
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT	6
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT	9
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT	12
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT	15
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT	18
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT	21
+
+/* UNK30 seems to enables point to quad transformation on textures
+ * (or something closely related to that).
+ * This bit is rather fatal at the time being due to lackings at pixel
+ * shader side
+ */
+#define R300_GB_ENABLE	0x4008
+#	define R300_GB_POINT_STUFF_ENABLE	(1<<0)
+#	define R300_GB_LINE_STUFF_ENABLE	(1<<1)
+#	define R300_GB_TRIANGLE_STUFF_ENABLE	(1<<2)
+#	define R300_GB_STENCIL_AUTO_ENABLE	(1<<4)
+#	define R300_GB_UNK31			(1<<31)
+	/* each of the following is 2 bits wide */
+#define R300_GB_TEX_REPLICATE	0
+#define R300_GB_TEX_ST		1
+#define R300_GB_TEX_STR		2
+#	define R300_GB_TEX0_SOURCE_SHIFT	16
+#	define R300_GB_TEX1_SOURCE_SHIFT	18
+#	define R300_GB_TEX2_SOURCE_SHIFT	20
+#	define R300_GB_TEX3_SOURCE_SHIFT	22
+#	define R300_GB_TEX4_SOURCE_SHIFT	24
+#	define R300_GB_TEX5_SOURCE_SHIFT	26
+#	define R300_GB_TEX6_SOURCE_SHIFT	28
+#	define R300_GB_TEX7_SOURCE_SHIFT	30
+
+/* MSPOS - positions for multisample antialiasing (?) */
+#define R300_GB_MSPOS0	0x4010
+	/* shifts - each of the fields is 4 bits */
+#	define R300_GB_MSPOS0__MS_X0_SHIFT	0
+#	define R300_GB_MSPOS0__MS_Y0_SHIFT	4
+#	define R300_GB_MSPOS0__MS_X1_SHIFT	8
+#	define R300_GB_MSPOS0__MS_Y1_SHIFT	12
+#	define R300_GB_MSPOS0__MS_X2_SHIFT	16
+#	define R300_GB_MSPOS0__MS_Y2_SHIFT	20
+#	define R300_GB_MSPOS0__MSBD0_Y		24
+#	define R300_GB_MSPOS0__MSBD0_X		28
+
+#define R300_GB_MSPOS1	0x4014
+#	define R300_GB_MSPOS1__MS_X3_SHIFT	0
+#	define R300_GB_MSPOS1__MS_Y3_SHIFT	4
+#	define R300_GB_MSPOS1__MS_X4_SHIFT	8
+#	define R300_GB_MSPOS1__MS_Y4_SHIFT	12
+#	define R300_GB_MSPOS1__MS_X5_SHIFT	16
+#	define R300_GB_MSPOS1__MS_Y5_SHIFT	20
+#	define R300_GB_MSPOS1__MSBD1		24
+
+
+#define R300_GB_TILE_CONFIG	0x4018
+#	define R300_GB_TILE_ENABLE	(1<<0)
+#	define R300_GB_TILE_PIPE_COUNT_RV300	0
+#	define R300_GB_TILE_PIPE_COUNT_R300	(3<<1)
+#	define R300_GB_TILE_PIPE_COUNT_R420	(7<<1)
+#	define R300_GB_TILE_PIPE_COUNT_RV410	(3<<1)
+#	define R300_GB_TILE_SIZE_8		0
+#	define R300_GB_TILE_SIZE_16		(1<<4)
+#	define R300_GB_TILE_SIZE_32		(2<<4)
+#	define R300_GB_SUPER_SIZE_1		(0<<6)
+#	define R300_GB_SUPER_SIZE_2		(1<<6)
+#	define R300_GB_SUPER_SIZE_4		(2<<6)
+#	define R300_GB_SUPER_SIZE_8		(3<<6)
+#	define R300_GB_SUPER_SIZE_16		(4<<6)
+#	define R300_GB_SUPER_SIZE_32		(5<<6)
+#	define R300_GB_SUPER_SIZE_64		(6<<6)
+#	define R300_GB_SUPER_SIZE_128		(7<<6)
+#	define R300_GB_SUPER_X_SHIFT		9	/* 3 bits wide */
+#	define R300_GB_SUPER_Y_SHIFT		12	/* 3 bits wide */
+#	define R300_GB_SUPER_TILE_A		0
+#	define R300_GB_SUPER_TILE_B		(1<<15)
+#	define R300_GB_SUBPIXEL_1_12		0
+#	define R300_GB_SUBPIXEL_1_16		(1<<16)
+
+#define R300_GB_FIFO_SIZE	0x4024
+	/* each of the following is 2 bits wide */
+#define R300_GB_FIFO_SIZE_32	0
+#define R300_GB_FIFO_SIZE_64	1
+#define R300_GB_FIFO_SIZE_128	2
+#define R300_GB_FIFO_SIZE_256	3
+#	define R300_SC_IFIFO_SIZE_SHIFT	0
+#	define R300_SC_TZFIFO_SIZE_SHIFT	2
+#	define R300_SC_BFIFO_SIZE_SHIFT	4
+
+#	define R300_US_OFIFO_SIZE_SHIFT	12
+#	define R300_US_WFIFO_SIZE_SHIFT	14
+	/* the following use the same constants as above, but meaning is
+	   is times 2 (i.e. instead of 32 words it means 64 */
+#	define R300_RS_TFIFO_SIZE_SHIFT	6
+#	define R300_RS_CFIFO_SIZE_SHIFT	8
+#	define R300_US_RAM_SIZE_SHIFT		10
+	/* watermarks, 3 bits wide */
+#	define R300_RS_HIGHWATER_COL_SHIFT	16
+#	define R300_RS_HIGHWATER_TEX_SHIFT	19
+#	define R300_OFIFO_HIGHWATER_SHIFT	22	/* two bits only */
+#	define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT	24
+
+#define R300_GB_SELECT	0x401C
+#	define R300_GB_FOG_SELECT_C0A		0
+#	define R300_GB_FOG_SELECT_C1A		1
+#	define R300_GB_FOG_SELECT_C2A		2
+#	define R300_GB_FOG_SELECT_C3A		3
+#	define R300_GB_FOG_SELECT_1_1_W	4
+#	define R300_GB_FOG_SELECT_Z		5
+#	define R300_GB_DEPTH_SELECT_Z		0
+#	define R300_GB_DEPTH_SELECT_1_1_W	(1<<3)
+#	define R300_GB_W_SELECT_1_W		0
+#	define R300_GB_W_SELECT_1		(1<<4)
+
+#define R300_GB_AA_CONFIG		0x4020
+#	define R300_AA_DISABLE			0x00
+#	define R300_AA_ENABLE			0x01
+#	define R300_AA_SUBSAMPLES_2		0
+#	define R300_AA_SUBSAMPLES_3		(1<<1)
+#	define R300_AA_SUBSAMPLES_4		(2<<1)
+#	define R300_AA_SUBSAMPLES_6		(3<<1)
+
+/* gap */
+
+/* Zero to flush caches. */
+#define R300_TX_INVALTAGS                   0x4100
+#define R300_TX_FLUSH                       0x0
+
+/* The upper enable bits are guessed, based on fglrx reported limits. */
+#define R300_TX_ENABLE                      0x4104
+#       define R300_TX_ENABLE_0                  (1 << 0)
+#       define R300_TX_ENABLE_1                  (1 << 1)
+#       define R300_TX_ENABLE_2                  (1 << 2)
+#       define R300_TX_ENABLE_3                  (1 << 3)
+#       define R300_TX_ENABLE_4                  (1 << 4)
+#       define R300_TX_ENABLE_5                  (1 << 5)
+#       define R300_TX_ENABLE_6                  (1 << 6)
+#       define R300_TX_ENABLE_7                  (1 << 7)
+#       define R300_TX_ENABLE_8                  (1 << 8)
+#       define R300_TX_ENABLE_9                  (1 << 9)
+#       define R300_TX_ENABLE_10                 (1 << 10)
+#       define R300_TX_ENABLE_11                 (1 << 11)
+#       define R300_TX_ENABLE_12                 (1 << 12)
+#       define R300_TX_ENABLE_13                 (1 << 13)
+#       define R300_TX_ENABLE_14                 (1 << 14)
+#       define R300_TX_ENABLE_15                 (1 << 15)
+
+/* The pointsize is given in multiples of 6. The pointsize can be
+ * enormous: Clear() renders a single point that fills the entire
+ * framebuffer.
+ */
+#define R300_RE_POINTSIZE                   0x421C
+#       define R300_POINTSIZE_Y_SHIFT            0
+#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_POINTSIZE_X_SHIFT            16
+#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16) /* GUESS */
+#       define R300_POINTSIZE_MAX             (R300_POINTSIZE_Y_MASK / 6)
+
+/* The line width is given in multiples of 6.
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
+#define R300_RE_LINE_CNT                      0x4234
+#       define R300_LINESIZE_SHIFT            0
+#       define R300_LINESIZE_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_LINESIZE_MAX             (R300_LINESIZE_MASK / 6)
+#       define R300_LINE_CNT_HO               (1 << 16)
+#       define R300_LINE_CNT_VE               (1 << 17)
+
+/* Some sort of scale or clamp value for texcoordless textures. */
+#define R300_RE_UNK4238                       0x4238
+
+/* Something shade related */
+#define R300_RE_SHADE                         0x4274
+
+#define R300_RE_SHADE_MODEL                   0x4278
+#	define R300_RE_SHADE_MODEL_SMOOTH     0x3aaaa
+#	define R300_RE_SHADE_MODEL_FLAT       0x39595
+
+/* Dangerous */
+#define R300_RE_POLYGON_MODE                  0x4288
+#	define R300_PM_ENABLED                (1 << 0)
+#	define R300_PM_FRONT_POINT            (0 << 0)
+#	define R300_PM_BACK_POINT             (0 << 0)
+#	define R300_PM_FRONT_LINE             (1 << 4)
+#	define R300_PM_FRONT_FILL             (1 << 5)
+#	define R300_PM_BACK_LINE              (1 << 7)
+#	define R300_PM_BACK_FILL              (1 << 8)
+
+/* Fog parameters */
+#define R300_RE_FOG_SCALE                     0x4294
+#define R300_RE_FOG_START                     0x4298
+
+/* Not sure why there are duplicate of factor and constant values.
+ * My best guess so far is that there are separate zbiases for test and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation of zbias
+ * via pixel shaders.
+ */
+#define R300_RE_ZBIAS_CNTL                    0x42A0 /* GUESS */
+#define R300_RE_ZBIAS_T_FACTOR                0x42A4
+#define R300_RE_ZBIAS_T_CONSTANT              0x42A8
+#define R300_RE_ZBIAS_W_FACTOR                0x42AC
+#define R300_RE_ZBIAS_W_CONSTANT              0x42B0
+
+/* This register needs to be set to (1<<1) for RV350 to correctly
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias primitive
+ * (FILL, LINE, POINT).
+ *  One to enable depth test and one for depth write.
+ * Yet this doesn't explain why depth writes work ...
+ */
+#define R300_RE_OCCLUSION_CNTL		    0x42B4
+#	define R300_OCCLUSION_ON		(1<<1)
+
+#define R300_RE_CULL_CNTL                   0x42B8
+#       define R300_CULL_FRONT                   (1 << 0)
+#       define R300_CULL_BACK                    (1 << 1)
+#       define R300_FRONT_FACE_CCW               (0 << 2)
+#       define R300_FRONT_FACE_CW                (1 << 2)
+
+
+/* BEGIN: Rasterization / Interpolators - many guesses */
+
+/* 0_UNKNOWN_18 has always been set except for clear operations.
+ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+ * on the vertex program, *not* the fragment program)
+ */
+#define R300_RS_CNTL_0                      0x4300
+#       define R300_RS_CNTL_TC_CNT_SHIFT         2
+#       define R300_RS_CNTL_TC_CNT_MASK          (7 << 2)
+	/* number of color interpolators used */
+#	define R300_RS_CNTL_CI_CNT_SHIFT         7
+#       define R300_RS_CNTL_0_UNKNOWN_18         (1 << 18)
+	/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
+	   register. */
+#define R300_RS_CNTL_1                      0x4304
+
+/* gap */
+
+/* Only used for texture coordinates.
+ * Use the source field to route texture coordinate input from the
+ * vertex program to the desired interpolator. Note that the source
+ * field is relative to the outputs the vertex program *actually*
+ * writes. If a vertex program only writes texcoord[1], this will
+ * be source index 0.
+ * Set INTERP_USED on all interpolators that produce data used by
+ * the fragment program. INTERP_USED looks like a swizzling mask,
+ * but I haven't seen it used that way.
+ *
+ * Note: The _UNKNOWN constants are always set in their respective
+ * register. I don't know if this is necessary.
+ */
+#define R300_RS_INTERP_0                    0x4310
+#define R300_RS_INTERP_1                    0x4314
+#       define R300_RS_INTERP_1_UNKNOWN          0x40
+#define R300_RS_INTERP_2                    0x4318
+#       define R300_RS_INTERP_2_UNKNOWN          0x80
+#define R300_RS_INTERP_3                    0x431C
+#       define R300_RS_INTERP_3_UNKNOWN          0xC0
+#define R300_RS_INTERP_4                    0x4320
+#define R300_RS_INTERP_5                    0x4324
+#define R300_RS_INTERP_6                    0x4328
+#define R300_RS_INTERP_7                    0x432C
+#       define R300_RS_INTERP_SRC_SHIFT          2
+#       define R300_RS_INTERP_SRC_MASK           (7 << 2)
+#       define R300_RS_INTERP_USED               0x00D10000
+
+/* These DWORDs control how vertex data is routed into fragment program
+ * registers, after interpolators.
+ */
+#define R300_RS_ROUTE_0                     0x4330
+#define R300_RS_ROUTE_1                     0x4334
+#define R300_RS_ROUTE_2                     0x4338
+#define R300_RS_ROUTE_3                     0x433C /* GUESS */
+#define R300_RS_ROUTE_4                     0x4340 /* GUESS */
+#define R300_RS_ROUTE_5                     0x4344 /* GUESS */
+#define R300_RS_ROUTE_6                     0x4348 /* GUESS */
+#define R300_RS_ROUTE_7                     0x434C /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_0     0
+#       define R300_RS_ROUTE_SOURCE_INTERP_1     1
+#       define R300_RS_ROUTE_SOURCE_INTERP_2     2
+#       define R300_RS_ROUTE_SOURCE_INTERP_3     3
+#       define R300_RS_ROUTE_SOURCE_INTERP_4     4
+#       define R300_RS_ROUTE_SOURCE_INTERP_5     5 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_6     6 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_7     7 /* GUESS */
+#       define R300_RS_ROUTE_ENABLE              (1 << 3) /* GUESS */
+#       define R300_RS_ROUTE_DEST_SHIFT          6
+#       define R300_RS_ROUTE_DEST_MASK           (31 << 6) /* GUESS */
+
+/* Special handling for color: When the fragment program uses color,
+ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+ * color register index.
+ *
+ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
+ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
+ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
+ * correct or not. - Oliver.
+ */
+#       define R300_RS_ROUTE_0_COLOR             (1 << 14)
+#       define R300_RS_ROUTE_0_COLOR_DEST_SHIFT  17
+#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17) /* GUESS */
+/* As above, but for secondary color */
+#		define R300_RS_ROUTE_1_COLOR1            (1 << 14)
+#		define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
+#		define R300_RS_ROUTE_1_COLOR1_DEST_MASK  (31 << 17)
+#		define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
+/* END: Rasterization / Interpolators - many guesses */
+
+/* Hierarchical Z Enable */
+#define R300_SC_HYPERZ                   0x43a4
+#	define R300_SC_HYPERZ_DISABLE     (0 << 0)
+#	define R300_SC_HYPERZ_ENABLE      (1 << 0)
+#	define R300_SC_HYPERZ_MIN         (0 << 1)
+#	define R300_SC_HYPERZ_MAX         (1 << 1)
+#	define R300_SC_HYPERZ_ADJ_256     (0 << 2)
+#	define R300_SC_HYPERZ_ADJ_128     (1 << 2)
+#	define R300_SC_HYPERZ_ADJ_64      (2 << 2)
+#	define R300_SC_HYPERZ_ADJ_32      (3 << 2)
+#	define R300_SC_HYPERZ_ADJ_16      (4 << 2)
+#	define R300_SC_HYPERZ_ADJ_8       (5 << 2)
+#	define R300_SC_HYPERZ_ADJ_4       (6 << 2)
+#	define R300_SC_HYPERZ_ADJ_2       (7 << 2)
+#	define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
+#	define R300_SC_HYPERZ_HZ_Z0MIN    (1 << 5)
+#	define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
+#	define R300_SC_HYPERZ_HZ_Z0MAX    (1 << 6)
+
+#define R300_SC_EDGERULE                 0x43a8
+
+/* BEGIN: Scissors and cliprects */
+
+/* There are four clipping rectangles. Their corner coordinates are inclusive.
+ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+ * on whether the pixel is inside cliprects 0-3, respectively. For example,
+ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+ * the number 3 (binary 0011).
+ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+ * the pixel is rasterized.
+ *
+ * In addition to this, there is a scissors rectangle. Only pixels inside the
+ * scissors rectangle are drawn. (coordinates are inclusive)
+ *
+ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+ * for the purpose of clipping and scissors.
+ */
+#define R300_RE_CLIPRECT_TL_0               0x43B0
+#define R300_RE_CLIPRECT_BR_0               0x43B4
+#define R300_RE_CLIPRECT_TL_1               0x43B8
+#define R300_RE_CLIPRECT_BR_1               0x43BC
+#define R300_RE_CLIPRECT_TL_2               0x43C0
+#define R300_RE_CLIPRECT_BR_2               0x43C4
+#define R300_RE_CLIPRECT_TL_3               0x43C8
+#define R300_RE_CLIPRECT_BR_3               0x43CC
+#       define R300_CLIPRECT_OFFSET              1440
+#       define R300_CLIPRECT_MASK                0x1FFF
+#       define R300_CLIPRECT_X_SHIFT             0
+#       define R300_CLIPRECT_X_MASK              (0x1FFF << 0)
+#       define R300_CLIPRECT_Y_SHIFT             13
+#       define R300_CLIPRECT_Y_MASK              (0x1FFF << 13)
+#define R300_RE_CLIPRECT_CNTL               0x43D0
+#       define R300_CLIP_OUT                     (1 << 0)
+#       define R300_CLIP_0                       (1 << 1)
+#       define R300_CLIP_1                       (1 << 2)
+#       define R300_CLIP_10                      (1 << 3)
+#       define R300_CLIP_2                       (1 << 4)
+#       define R300_CLIP_20                      (1 << 5)
+#       define R300_CLIP_21                      (1 << 6)
+#       define R300_CLIP_210                     (1 << 7)
+#       define R300_CLIP_3                       (1 << 8)
+#       define R300_CLIP_30                      (1 << 9)
+#       define R300_CLIP_31                      (1 << 10)
+#       define R300_CLIP_310                     (1 << 11)
+#       define R300_CLIP_32                      (1 << 12)
+#       define R300_CLIP_320                     (1 << 13)
+#       define R300_CLIP_321                     (1 << 14)
+#       define R300_CLIP_3210                    (1 << 15)
+
+/* gap */
+
+#define R300_RE_SCISSORS_TL                 0x43E0
+#define R300_RE_SCISSORS_BR                 0x43E4
+#       define R300_SCISSORS_OFFSET              1440
+#       define R300_SCISSORS_X_SHIFT             0
+#       define R300_SCISSORS_X_MASK              (0x1FFF << 0)
+#       define R300_SCISSORS_Y_SHIFT             13
+#       define R300_SCISSORS_Y_MASK              (0x1FFF << 13)
+/* END: Scissors and cliprects */
+
+/* BEGIN: Texture specification */
+
+/*
+ * The texture specification dwords are grouped by meaning and not by texture
+ * unit. This means that e.g. the offset for texture image unit N is found in
+ * register TX_OFFSET_0 + (4*N)
+ */
+#define R300_TX_FILTER_0                    0x4400
+#       define R300_TX_REPEAT                    0
+#       define R300_TX_MIRRORED                  1
+#       define R300_TX_CLAMP                     4
+#       define R300_TX_CLAMP_TO_EDGE             2
+#       define R300_TX_CLAMP_TO_BORDER           6
+#       define R300_TX_WRAP_S_SHIFT              0
+#       define R300_TX_WRAP_S_MASK               (7 << 0)
+#       define R300_TX_WRAP_T_SHIFT              3
+#       define R300_TX_WRAP_T_MASK               (7 << 3)
+#       define R300_TX_WRAP_Q_SHIFT              6
+#       define R300_TX_WRAP_Q_MASK               (7 << 6)
+#       define R300_TX_MAG_FILTER_NEAREST        (1 << 9)
+#       define R300_TX_MAG_FILTER_LINEAR         (2 << 9)
+#       define R300_TX_MAG_FILTER_MASK           (3 << 9)
+#       define R300_TX_MIN_FILTER_NEAREST        (1 << 11)
+#       define R300_TX_MIN_FILTER_LINEAR         (2 << 11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST       (5  <<  11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR        (9  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR         (10 <<  11)
+
+/* NOTE: NEAREST doesn't seem to exist.
+ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+ * anisotropy modes because that would void selected mag filter
+ */
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST             (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_LINEAR              (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (2 << 13)
+#       define R300_TX_MIN_FILTER_MASK   ( (15 << 11) | (3 << 13) )
+#	define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
+#	define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
+#	define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
+#	define R300_TX_MAX_ANISO_8_TO_1  (6 << 21)
+#	define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
+#	define R300_TX_MAX_ANISO_MASK    (14 << 21)
+
+#define R300_TX_FILTER1_0                      0x4440
+#	define R300_CHROMA_KEY_MODE_DISABLE    0
+#	define R300_CHROMA_KEY_FORCE	       1
+#	define R300_CHROMA_KEY_BLEND           2
+#	define R300_MC_ROUND_NORMAL            (0<<2)
+#	define R300_MC_ROUND_MPEG4             (1<<2)
+#	define R300_LOD_BIAS_MASK	    0x1fff
+#	define R300_EDGE_ANISO_EDGE_DIAG       (0<<13)
+#	define R300_EDGE_ANISO_EDGE_ONLY       (1<<13)
+#	define R300_MC_COORD_TRUNCATE_DISABLE  (0<<14)
+#	define R300_MC_COORD_TRUNCATE_MPEG     (1<<14)
+#	define R300_TX_TRI_PERF_0_8            (0<<15)
+#	define R300_TX_TRI_PERF_1_8            (1<<15)
+#	define R300_TX_TRI_PERF_1_4            (2<<15)
+#	define R300_TX_TRI_PERF_3_8            (3<<15)
+#	define R300_ANISO_THRESHOLD_MASK       (7<<17)
+
+#define R300_TX_SIZE_0                      0x4480
+#       define R300_TX_WIDTHMASK_SHIFT           0
+#       define R300_TX_WIDTHMASK_MASK            (2047 << 0)
+#       define R300_TX_HEIGHTMASK_SHIFT          11
+#       define R300_TX_HEIGHTMASK_MASK           (2047 << 11)
+#       define R300_TX_UNK23                     (1 << 23)
+#       define R300_TX_MAX_MIP_LEVEL_SHIFT       26
+#       define R300_TX_MAX_MIP_LEVEL_MASK        (0xf << 26)
+#       define R300_TX_SIZE_PROJECTED            (1<<30)
+#       define R300_TX_SIZE_TXPITCH_EN           (1<<31)
+#define R300_TX_FORMAT_0                    0x44C0
+	/* The interpretation of the format word by Wladimir van der Laan */
+	/* The X, Y, Z and W refer to the layout of the components.
+	   They are given meanings as R, G, B and Alpha by the swizzle
+	   specification */
+#	define R300_TX_FORMAT_X8		    0x0
+#	define R300_TX_FORMAT_X16		    0x1
+#	define R300_TX_FORMAT_Y4X4		    0x2
+#	define R300_TX_FORMAT_Y8X8		    0x3
+#	define R300_TX_FORMAT_Y16X16		    0x4
+#	define R300_TX_FORMAT_Z3Y3X2		    0x5
+#	define R300_TX_FORMAT_Z5Y6X5		    0x6
+#	define R300_TX_FORMAT_Z6Y5X5		    0x7
+#	define R300_TX_FORMAT_Z11Y11X10		    0x8
+#	define R300_TX_FORMAT_Z10Y11X11		    0x9
+#	define R300_TX_FORMAT_W4Z4Y4X4		    0xA
+#	define R300_TX_FORMAT_W1Z5Y5X5		    0xB
+#	define R300_TX_FORMAT_W8Z8Y8X8		    0xC
+#	define R300_TX_FORMAT_W2Z10Y10X10	    0xD
+#	define R300_TX_FORMAT_W16Z16Y16X16	    0xE
+#	define R300_TX_FORMAT_DXT1		    0xF
+#	define R300_TX_FORMAT_DXT3		    0x10
+#	define R300_TX_FORMAT_DXT5		    0x11
+#	define R300_TX_FORMAT_D3DMFT_CxV8U8	    0x12     /* no swizzle */
+#	define R300_TX_FORMAT_A8R8G8B8		    0x13     /* no swizzle */
+#	define R300_TX_FORMAT_B8G8_B8G8		    0x14     /* no swizzle */
+#	define R300_TX_FORMAT_G8R8_G8B8		    0x15     /* no swizzle */
+	/* 0x16 - some 16 bit green format.. ?? */
+#	define R300_TX_FORMAT_UNK25		   (1 << 25) /* no swizzle */
+#	define R300_TX_FORMAT_CUBIC_MAP		   (1 << 26)
+
+	/* gap */
+	/* Floating point formats */
+	/* Note - hardware supports both 16 and 32 bit floating point */
+#	define R300_TX_FORMAT_FL_I16		    0x18
+#	define R300_TX_FORMAT_FL_I16A16		    0x19
+#	define R300_TX_FORMAT_FL_R16G16B16A16	    0x1A
+#	define R300_TX_FORMAT_FL_I32		    0x1B
+#	define R300_TX_FORMAT_FL_I32A32		    0x1C
+#	define R300_TX_FORMAT_FL_R32G32B32A32	    0x1D
+#	define R300_TX_FORMAT_ATI2N		    0x1F
+	/* alpha modes, convenience mostly */
+	/* if you have alpha, pick constant appropriate to the
+	   number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
+#	define R300_TX_FORMAT_ALPHA_1CH		    0x000
+#	define R300_TX_FORMAT_ALPHA_2CH		    0x200
+#	define R300_TX_FORMAT_ALPHA_4CH		    0x600
+#	define R300_TX_FORMAT_ALPHA_NONE	    0xA00
+	/* Swizzling */
+	/* constants */
+#	define R300_TX_FORMAT_X		0
+#	define R300_TX_FORMAT_Y		1
+#	define R300_TX_FORMAT_Z		2
+#	define R300_TX_FORMAT_W		3
+#	define R300_TX_FORMAT_ZERO	4
+#	define R300_TX_FORMAT_ONE	5
+	/* 2.0*Z, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_Z	6
+	/* 2.0*W, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_W	7
+
+#	define R300_TX_FORMAT_B_SHIFT	18
+#	define R300_TX_FORMAT_G_SHIFT	15
+#	define R300_TX_FORMAT_R_SHIFT	12
+#	define R300_TX_FORMAT_A_SHIFT	9
+	/* Convenience macro to take care of layout and swizzling */
+#	define R300_EASY_TX_FORMAT(B, G, R, A, FMT)	(		\
+		((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT)		\
+		| ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT)	\
+		| ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT)	\
+		| ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT)	\
+		| (R300_TX_FORMAT_##FMT)				\
+		)
+	/* These can be ORed with result of R300_EASY_TX_FORMAT()
+	   We don't really know what they do. Take values from a
+           constant color ? */
+#	define R300_TX_FORMAT_CONST_X		(1<<5)
+#	define R300_TX_FORMAT_CONST_Y		(2<<5)
+#	define R300_TX_FORMAT_CONST_Z		(4<<5)
+#	define R300_TX_FORMAT_CONST_W		(8<<5)
+
+#	define R300_TX_FORMAT_YUV_MODE		0x00800000
+
+#define R300_TX_PITCH_0			    0x4500 /* obvious missing in gap */
+#define R300_TX_OFFSET_0                    0x4540
+	/* BEGIN: Guess from R200 */
+#       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
+#       define R300_TXO_ENDIAN_BYTE_SWAP         (1 << 0)
+#       define R300_TXO_ENDIAN_WORD_SWAP         (2 << 0)
+#       define R300_TXO_ENDIAN_HALFDW_SWAP       (3 << 0)
+#       define R300_TXO_MACRO_TILE               (1 << 2)
+#       define R300_TXO_MICRO_TILE               (1 << 3)
+#       define R300_TXO_MICRO_TILE_SQUARE        (2 << 3)
+#       define R300_TXO_OFFSET_MASK              0xffffffe0
+#       define R300_TXO_OFFSET_SHIFT             5
+	/* END: Guess from R200 */
+
+/* 32 bit chroma key */
+#define R300_TX_CHROMA_KEY_0                      0x4580
+/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+#define R300_TX_BORDER_COLOR_0              0x45C0
+
+/* END: Texture specification */
+
+/* BEGIN: Fragment program instruction set */
+
+/* Fragment programs are written directly into register space.
+ * There are separate instruction streams for texture instructions and ALU
+ * instructions.
+ * In order to synchronize these streams, the program is divided into up
+ * to 4 nodes. Each node begins with a number of TEX operations, followed
+ * by a number of ALU operations.
+ * The first node can have zero TEX ops, all subsequent nodes must have at
+ * least
+ * one TEX ops.
+ * All nodes must have at least one ALU op.
+ *
+ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+ * 1 node, a value of 3 means 4 nodes.
+ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+ * offsets into the respective instruction streams, while *_END points to the
+ * last instruction relative to this offset.
+ */
+#define R300_PFS_CNTL_0                     0x4600
+#       define R300_PFS_CNTL_LAST_NODES_SHIFT    0
+#       define R300_PFS_CNTL_LAST_NODES_MASK     (3 << 0)
+#       define R300_PFS_CNTL_FIRST_NODE_HAS_TEX  (1 << 3)
+#define R300_PFS_CNTL_1                     0x4604
+/* There is an unshifted value here which has so far always been equal to the
+ * index of the highest used temporary register.
+ */
+#define R300_PFS_CNTL_2                     0x4608
+#       define R300_PFS_CNTL_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_CNTL_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_CNTL_ALU_END_SHIFT       6
+#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_CNTL_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12) /* GUESS */
+#       define R300_PFS_CNTL_TEX_END_SHIFT       18
+#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18) /* GUESS */
+
+/* gap */
+
+/* Nodes are stored backwards. The last active node is always stored in
+ * PFS_NODE_3.
+ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+ * first node is stored in NODE_2, the second node is stored in NODE_3.
+ *
+ * Offsets are relative to the master offset from PFS_CNTL_2.
+ */
+#define R300_PFS_NODE_0                     0x4610
+#define R300_PFS_NODE_1                     0x4614
+#define R300_PFS_NODE_2                     0x4618
+#define R300_PFS_NODE_3                     0x461C
+#       define R300_PFS_NODE_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_NODE_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_NODE_ALU_END_SHIFT       6
+#       define R300_PFS_NODE_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_NODE_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_NODE_TEX_OFFSET_MASK     (31 << 12)
+#       define R300_PFS_NODE_TEX_END_SHIFT       17
+#       define R300_PFS_NODE_TEX_END_MASK        (31 << 17)
+#		define R300_PFS_NODE_OUTPUT_COLOR        (1 << 22)
+#		define R300_PFS_NODE_OUTPUT_DEPTH        (1 << 23)
+
+/* TEX
+ * As far as I can tell, texture instructions cannot write into output
+ * registers directly. A subsequent ALU instruction is always necessary,
+ * even if it's just MAD o0, r0, 1, 0
+ */
+#define R300_PFS_TEXI_0                     0x4620
+#	define R300_FPITX_SRC_SHIFT              0
+#	define R300_FPITX_SRC_MASK               (31 << 0)
+	/* GUESS */
+#	define R300_FPITX_SRC_CONST              (1 << 5)
+#	define R300_FPITX_DST_SHIFT              6
+#	define R300_FPITX_DST_MASK               (31 << 6)
+#	define R300_FPITX_IMAGE_SHIFT            11
+	/* GUESS based on layout and native limits */
+#       define R300_FPITX_IMAGE_MASK             (15 << 11)
+/* Unsure if these are opcodes, or some kind of bitfield, but this is how
+ * they were set when I checked
+ */
+#	define R300_FPITX_OPCODE_SHIFT		15
+#		define R300_FPITX_OP_TEX	1
+#		define R300_FPITX_OP_KIL	2
+#		define R300_FPITX_OP_TXP	3
+#		define R300_FPITX_OP_TXB	4
+#	define R300_FPITX_OPCODE_MASK           (7 << 15)
+
+/* ALU
+ * The ALU instructions register blocks are enumerated according to the order
+ * in which fglrx. I assume there is space for 64 instructions, since
+ * each block has space for a maximum of 64 DWORDs, and this matches reported
+ * native limits.
+ *
+ * The basic functional block seems to be one MAD for each color and alpha,
+ * and an adder that adds all components after the MUL.
+ *  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+ *  - DP4: Use OUTC_DP4, OUTA_DP4
+ *  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+ *  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ *  - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
+ *  - CMP: If ARG2 < 0, return ARG1, else return ARG0
+ *  - FLR: use FRC+MAD
+ *  - XPD: use MAD+MAD
+ *  - SGE, SLT: use MAD+CMP
+ *  - RSQ: use ABS modifier for argument
+ *  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
+ *    (e.g. RCP) into color register
+ *  - apparently, there's no quick DST operation
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+ *  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+ *
+ * Operand selection
+ * First stage selects three sources from the available registers and
+ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+ * fglrx sorts the three source fields: Registers before constants,
+ * lower indices before higher indices; I do not know whether this is
+ * necessary.
+ *
+ * fglrx fills unused sources with "read constant 0"
+ * According to specs, you cannot select more than two different constants.
+ *
+ * Second stage selects the operands from the sources. This is defined in
+ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+ * zero and one.
+ * Swizzling and negation happens in this stage, as well.
+ *
+ * Important: Color and alpha seem to be mostly separate, i.e. their sources
+ * selection appears to be fully independent (the register storage is probably
+ * physically split into a color and an alpha section).
+ * However (because of the apparent physical split), there is some interaction
+ * WRT swizzling. If, for example, you want to load an R component into an
+ * Alpha operand, this R component is taken from a *color* source, not from
+ * an alpha source. The corresponding register doesn't even have to appear in
+ * the alpha sources list. (I hope this all makes sense to you)
+ *
+ * Destination selection
+ * The destination register index is in FPI1 (color) and FPI3 (alpha)
+ * together with enable bits.
+ * There are separate enable bits for writing into temporary registers
+ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
+ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
+ * same index must be used for both).
+ *
+ * Note: There is a special form for LRP
+ *  - Argument order is the same as in ARB_fragment_program.
+ *  - Operation is MAD
+ *  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+ *  - Set FPI0/FPI2_SPECIAL_LRP
+ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+ */
+#define R300_PFS_INSTR1_0                   0x46C0
+#       define R300_FPI1_SRC0C_SHIFT             0
+#       define R300_FPI1_SRC0C_MASK              (31 << 0)
+#       define R300_FPI1_SRC0C_CONST             (1 << 5)
+#       define R300_FPI1_SRC1C_SHIFT             6
+#       define R300_FPI1_SRC1C_MASK              (31 << 6)
+#       define R300_FPI1_SRC1C_CONST             (1 << 11)
+#       define R300_FPI1_SRC2C_SHIFT             12
+#       define R300_FPI1_SRC2C_MASK              (31 << 12)
+#       define R300_FPI1_SRC2C_CONST             (1 << 17)
+#       define R300_FPI1_SRC_MASK                0x0003ffff
+#       define R300_FPI1_DSTC_SHIFT              18
+#       define R300_FPI1_DSTC_MASK               (31 << 18)
+#		define R300_FPI1_DSTC_REG_MASK_SHIFT     23
+#       define R300_FPI1_DSTC_REG_X              (1 << 23)
+#       define R300_FPI1_DSTC_REG_Y              (1 << 24)
+#       define R300_FPI1_DSTC_REG_Z              (1 << 25)
+#		define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT  26
+#       define R300_FPI1_DSTC_OUTPUT_X           (1 << 26)
+#       define R300_FPI1_DSTC_OUTPUT_Y           (1 << 27)
+#       define R300_FPI1_DSTC_OUTPUT_Z           (1 << 28)
+
+#define R300_PFS_INSTR3_0                   0x47C0
+#       define R300_FPI3_SRC0A_SHIFT             0
+#       define R300_FPI3_SRC0A_MASK              (31 << 0)
+#       define R300_FPI3_SRC0A_CONST             (1 << 5)
+#       define R300_FPI3_SRC1A_SHIFT             6
+#       define R300_FPI3_SRC1A_MASK              (31 << 6)
+#       define R300_FPI3_SRC1A_CONST             (1 << 11)
+#       define R300_FPI3_SRC2A_SHIFT             12
+#       define R300_FPI3_SRC2A_MASK              (31 << 12)
+#       define R300_FPI3_SRC2A_CONST             (1 << 17)
+#       define R300_FPI3_SRC_MASK                0x0003ffff
+#       define R300_FPI3_DSTA_SHIFT              18
+#       define R300_FPI3_DSTA_MASK               (31 << 18)
+#       define R300_FPI3_DSTA_REG                (1 << 23)
+#       define R300_FPI3_DSTA_OUTPUT             (1 << 24)
+#		define R300_FPI3_DSTA_DEPTH              (1 << 27)
+
+#define R300_PFS_INSTR0_0                   0x48C0
+#       define R300_FPI0_ARGC_SRC0C_XYZ          0
+#       define R300_FPI0_ARGC_SRC0C_XXX          1
+#       define R300_FPI0_ARGC_SRC0C_YYY          2
+#       define R300_FPI0_ARGC_SRC0C_ZZZ          3
+#       define R300_FPI0_ARGC_SRC1C_XYZ          4
+#       define R300_FPI0_ARGC_SRC1C_XXX          5
+#       define R300_FPI0_ARGC_SRC1C_YYY          6
+#       define R300_FPI0_ARGC_SRC1C_ZZZ          7
+#       define R300_FPI0_ARGC_SRC2C_XYZ          8
+#       define R300_FPI0_ARGC_SRC2C_XXX          9
+#       define R300_FPI0_ARGC_SRC2C_YYY          10
+#       define R300_FPI0_ARGC_SRC2C_ZZZ          11
+#       define R300_FPI0_ARGC_SRC0A              12
+#       define R300_FPI0_ARGC_SRC1A              13
+#       define R300_FPI0_ARGC_SRC2A              14
+#       define R300_FPI0_ARGC_SRC1C_LRP          15
+#       define R300_FPI0_ARGC_ZERO               20
+#       define R300_FPI0_ARGC_ONE                21
+	/* GUESS */
+#       define R300_FPI0_ARGC_HALF               22
+#       define R300_FPI0_ARGC_SRC0C_YZX          23
+#       define R300_FPI0_ARGC_SRC1C_YZX          24
+#       define R300_FPI0_ARGC_SRC2C_YZX          25
+#       define R300_FPI0_ARGC_SRC0C_ZXY          26
+#       define R300_FPI0_ARGC_SRC1C_ZXY          27
+#       define R300_FPI0_ARGC_SRC2C_ZXY          28
+#       define R300_FPI0_ARGC_SRC0CA_WZY         29
+#       define R300_FPI0_ARGC_SRC1CA_WZY         30
+#       define R300_FPI0_ARGC_SRC2CA_WZY         31
+
+#       define R300_FPI0_ARG0C_SHIFT             0
+#       define R300_FPI0_ARG0C_MASK              (31 << 0)
+#       define R300_FPI0_ARG0C_NEG               (1 << 5)
+#       define R300_FPI0_ARG0C_ABS               (1 << 6)
+#       define R300_FPI0_ARG1C_SHIFT             7
+#       define R300_FPI0_ARG1C_MASK              (31 << 7)
+#       define R300_FPI0_ARG1C_NEG               (1 << 12)
+#       define R300_FPI0_ARG1C_ABS               (1 << 13)
+#       define R300_FPI0_ARG2C_SHIFT             14
+#       define R300_FPI0_ARG2C_MASK              (31 << 14)
+#       define R300_FPI0_ARG2C_NEG               (1 << 19)
+#       define R300_FPI0_ARG2C_ABS               (1 << 20)
+#       define R300_FPI0_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI0_OUTC_MAD                (0 << 23)
+#       define R300_FPI0_OUTC_DP3                (1 << 23)
+#       define R300_FPI0_OUTC_DP4                (2 << 23)
+#       define R300_FPI0_OUTC_MIN                (4 << 23)
+#       define R300_FPI0_OUTC_MAX                (5 << 23)
+#       define R300_FPI0_OUTC_CMPH               (7 << 23)
+#       define R300_FPI0_OUTC_CMP                (8 << 23)
+#       define R300_FPI0_OUTC_FRC                (9 << 23)
+#       define R300_FPI0_OUTC_REPL_ALPHA         (10 << 23)
+#       define R300_FPI0_OUTC_SAT                (1 << 30)
+#       define R300_FPI0_INSERT_NOP              (1U << 31)
+
+#define R300_PFS_INSTR2_0                   0x49C0
+#       define R300_FPI2_ARGA_SRC0C_X            0
+#       define R300_FPI2_ARGA_SRC0C_Y            1
+#       define R300_FPI2_ARGA_SRC0C_Z            2
+#       define R300_FPI2_ARGA_SRC1C_X            3
+#       define R300_FPI2_ARGA_SRC1C_Y            4
+#       define R300_FPI2_ARGA_SRC1C_Z            5
+#       define R300_FPI2_ARGA_SRC2C_X            6
+#       define R300_FPI2_ARGA_SRC2C_Y            7
+#       define R300_FPI2_ARGA_SRC2C_Z            8
+#       define R300_FPI2_ARGA_SRC0A              9
+#       define R300_FPI2_ARGA_SRC1A              10
+#       define R300_FPI2_ARGA_SRC2A              11
+#       define R300_FPI2_ARGA_SRC1A_LRP          15
+#       define R300_FPI2_ARGA_ZERO               16
+#       define R300_FPI2_ARGA_ONE                17
+	/* GUESS */
+#       define R300_FPI2_ARGA_HALF               18
+#       define R300_FPI2_ARG0A_SHIFT             0
+#       define R300_FPI2_ARG0A_MASK              (31 << 0)
+#       define R300_FPI2_ARG0A_NEG               (1 << 5)
+	/* GUESS */
+#	define R300_FPI2_ARG0A_ABS		 (1 << 6)
+#       define R300_FPI2_ARG1A_SHIFT             7
+#       define R300_FPI2_ARG1A_MASK              (31 << 7)
+#       define R300_FPI2_ARG1A_NEG               (1 << 12)
+	/* GUESS */
+#	define R300_FPI2_ARG1A_ABS		 (1 << 13)
+#       define R300_FPI2_ARG2A_SHIFT             14
+#       define R300_FPI2_ARG2A_MASK              (31 << 14)
+#       define R300_FPI2_ARG2A_NEG               (1 << 19)
+	/* GUESS */
+#	define R300_FPI2_ARG2A_ABS		 (1 << 20)
+#       define R300_FPI2_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI2_OUTA_MAD                (0 << 23)
+#       define R300_FPI2_OUTA_DP4                (1 << 23)
+#       define R300_FPI2_OUTA_MIN                (2 << 23)
+#       define R300_FPI2_OUTA_MAX                (3 << 23)
+#       define R300_FPI2_OUTA_CMP                (6 << 23)
+#       define R300_FPI2_OUTA_FRC                (7 << 23)
+#       define R300_FPI2_OUTA_EX2                (8 << 23)
+#       define R300_FPI2_OUTA_LG2                (9 << 23)
+#       define R300_FPI2_OUTA_RCP                (10 << 23)
+#       define R300_FPI2_OUTA_RSQ                (11 << 23)
+#       define R300_FPI2_OUTA_SAT                (1 << 30)
+#       define R300_FPI2_UNKNOWN_31              (1U << 31)
+/* END: Fragment program instruction set */
+
+/* Fog state and color */
+#define R300_RE_FOG_STATE                   0x4BC0
+#       define R300_FOG_ENABLE                   (1 << 0)
+#	define R300_FOG_MODE_LINEAR              (0 << 1)
+#	define R300_FOG_MODE_EXP                 (1 << 1)
+#	define R300_FOG_MODE_EXP2                (2 << 1)
+#	define R300_FOG_MODE_MASK                (3 << 1)
+#define R300_FOG_COLOR_R                    0x4BC8
+#define R300_FOG_COLOR_G                    0x4BCC
+#define R300_FOG_COLOR_B                    0x4BD0
+
+#define R300_PP_ALPHA_TEST                  0x4BD4
+#       define R300_REF_ALPHA_MASK               0x000000ff
+#       define R300_ALPHA_TEST_FAIL              (0 << 8)
+#       define R300_ALPHA_TEST_LESS              (1 << 8)
+#       define R300_ALPHA_TEST_LEQUAL            (3 << 8)
+#       define R300_ALPHA_TEST_EQUAL             (2 << 8)
+#       define R300_ALPHA_TEST_GEQUAL            (6 << 8)
+#       define R300_ALPHA_TEST_GREATER           (4 << 8)
+#       define R300_ALPHA_TEST_NEQUAL            (5 << 8)
+#       define R300_ALPHA_TEST_PASS              (7 << 8)
+#       define R300_ALPHA_TEST_OP_MASK           (7 << 8)
+#       define R300_ALPHA_TEST_ENABLE            (1 << 11)
+
+/* gap */
+
+/* Fragment program parameters in 7.16 floating point */
+#define R300_PFS_PARAM_0_X                  0x4C00
+#define R300_PFS_PARAM_0_Y                  0x4C04
+#define R300_PFS_PARAM_0_Z                  0x4C08
+#define R300_PFS_PARAM_0_W                  0x4C0C
+/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
+#define R300_PFS_PARAM_31_X                 0x4DF0
+#define R300_PFS_PARAM_31_Y                 0x4DF4
+#define R300_PFS_PARAM_31_Z                 0x4DF8
+#define R300_PFS_PARAM_31_W                 0x4DFC
+
+/* Notes:
+ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
+ *   the application
+ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
+ *    are set to the same
+ *   function (both registers are always set up completely in any case)
+ * - Most blend flags are simply copied from R200 and not tested yet
+ */
+#define R300_RB3D_CBLEND                    0x4E04
+#define R300_RB3D_ABLEND                    0x4E08
+/* the following only appear in CBLEND */
+#       define R300_BLEND_ENABLE                     (1 << 0)
+#       define R300_BLEND_UNKNOWN                    (3 << 1)
+#       define R300_BLEND_NO_SEPARATE                (1 << 3)
+/* the following are shared between CBLEND and ABLEND */
+#       define R300_FCN_MASK                         (3  << 12)
+#       define R300_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define R300_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define R300_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define R300_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define R300_COMB_FCN_MIN                     (4  << 12)
+#       define R300_COMB_FCN_MAX                     (5  << 12)
+#       define R300_COMB_FCN_RSUB_CLAMP              (6  << 12)
+#       define R300_COMB_FCN_RSUB_NOCLAMP            (7  << 12)
+#       define R300_BLEND_GL_ZERO                    (32)
+#       define R300_BLEND_GL_ONE                     (33)
+#       define R300_BLEND_GL_SRC_COLOR               (34)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_COLOR     (35)
+#       define R300_BLEND_GL_DST_COLOR               (36)
+#       define R300_BLEND_GL_ONE_MINUS_DST_COLOR     (37)
+#       define R300_BLEND_GL_SRC_ALPHA               (38)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA     (39)
+#       define R300_BLEND_GL_DST_ALPHA               (40)
+#       define R300_BLEND_GL_ONE_MINUS_DST_ALPHA     (41)
+#       define R300_BLEND_GL_SRC_ALPHA_SATURATE      (42)
+#       define R300_BLEND_GL_CONST_COLOR             (43)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_COLOR   (44)
+#       define R300_BLEND_GL_CONST_ALPHA             (45)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA   (46)
+#       define R300_BLEND_MASK                       (63)
+#       define R300_SRC_BLEND_SHIFT                  (16)
+#       define R300_DST_BLEND_SHIFT                  (24)
+#define R300_RB3D_BLEND_COLOR               0x4E10
+#define R300_RB3D_COLORMASK                 0x4E0C
+#       define R300_COLORMASK0_B                 (1<<0)
+#       define R300_COLORMASK0_G                 (1<<1)
+#       define R300_COLORMASK0_R                 (1<<2)
+#       define R300_COLORMASK0_A                 (1<<3)
+
+/* gap */
+
+#define R300_RB3D_COLOROFFSET0              0x4E28
+#       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3              0x4E34 /* GUESS */
+
+/* gap */
+
+/* Bit 16: Larger tiles
+ * Bit 17: 4x2 tiles
+ * Bit 18: Extremely weird tile like, but some pixels duplicated?
+ */
+#define R300_RB3D_COLORPITCH0               0x4E38
+#       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
+#       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
+#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
+#       define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
+#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
+#       define R300_COLOR_FORMAT_RGB565          (2 << 22)
+#       define R300_COLOR_FORMAT_ARGB8888        (3 << 22)
+#define R300_RB3D_COLORPITCH1               0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
+
+#define R300_RB3D_AARESOLVE_OFFSET          0x4E80
+#define R300_RB3D_AARESOLVE_PITCH           0x4E84
+#define R300_RB3D_AARESOLVE_CTL             0x4E88
+/* gap */
+
+/* Guess by Vladimir.
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
+/*#define R300_RB3D_DSTCACHE_CTLSTAT          0x4E4C*/
+#       define R300_RB3D_DSTCACHE_UNKNOWN_02             0x00000002
+#       define R300_RB3D_DSTCACHE_UNKNOWN_0A             0x0000000A
+
+/* gap */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS
+ * for this.
+ * Bit (1<<8) is the "test" bit. so plain write is 6  - vd
+ */
+#define R300_ZB_CNTL                             0x4F00
+#	define R300_STENCIL_ENABLE		 (1 << 0)
+#	define R300_Z_ENABLE		         (1 << 1)
+#	define R300_Z_WRITE_ENABLE		 (1 << 2)
+#	define R300_Z_SIGNED_COMPARE		 (1 << 3)
+#	define R300_STENCIL_FRONT_BACK		 (1 << 4)
+
+#define R300_ZB_ZSTENCILCNTL                   0x4f04
+	/* functions */
+#	define R300_ZS_NEVER			0
+#	define R300_ZS_LESS			1
+#	define R300_ZS_LEQUAL			2
+#	define R300_ZS_EQUAL			3
+#	define R300_ZS_GEQUAL			4
+#	define R300_ZS_GREATER			5
+#	define R300_ZS_NOTEQUAL			6
+#	define R300_ZS_ALWAYS			7
+#       define R300_ZS_MASK                     7
+	/* operations */
+#	define R300_ZS_KEEP			0
+#	define R300_ZS_ZERO			1
+#	define R300_ZS_REPLACE			2
+#	define R300_ZS_INCR			3
+#	define R300_ZS_DECR			4
+#	define R300_ZS_INVERT			5
+#	define R300_ZS_INCR_WRAP		6
+#	define R300_ZS_DECR_WRAP		7
+#	define R300_Z_FUNC_SHIFT		0
+	/* front and back refer to operations done for front
+	   and back faces, i.e. separate stencil function support */
+#	define R300_S_FRONT_FUNC_SHIFT	        3
+#	define R300_S_FRONT_SFAIL_OP_SHIFT	6
+#	define R300_S_FRONT_ZPASS_OP_SHIFT	9
+#	define R300_S_FRONT_ZFAIL_OP_SHIFT      12
+#	define R300_S_BACK_FUNC_SHIFT           15
+#	define R300_S_BACK_SFAIL_OP_SHIFT       18
+#	define R300_S_BACK_ZPASS_OP_SHIFT       21
+#	define R300_S_BACK_ZFAIL_OP_SHIFT       24
+
+#define R300_ZB_STENCILREFMASK                        0x4f08
+#	define R300_STENCILREF_SHIFT       0
+#	define R300_STENCILREF_MASK        0x000000ff
+#	define R300_STENCILMASK_SHIFT      8
+#	define R300_STENCILMASK_MASK       0x0000ff00
+#	define R300_STENCILWRITEMASK_SHIFT 16
+#	define R300_STENCILWRITEMASK_MASK  0x00ff0000
+
+/* gap */
+
+#define R300_ZB_FORMAT                             0x4f10
+#	define R300_DEPTHFORMAT_16BIT_INT_Z   (0 << 0)
+#	define R300_DEPTHFORMAT_16BIT_13E3    (1 << 0)
+#	define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL   (2 << 0)
+/* reserved up to (15 << 0) */
+#	define R300_INVERT_13E3_LEADING_ONES  (0 << 4)
+#	define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
+
+#define R300_ZB_ZTOP                             0x4F14
+#	define R300_ZTOP_DISABLE                 (0 << 0)
+#	define R300_ZTOP_ENABLE                  (1 << 0)
+
+/* gap */
+
+#define R300_ZB_ZCACHE_CTLSTAT            0x4f18
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT      (0 << 0)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT       (0 << 1)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE            (1 << 1)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE            (0 << 31)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY            (1U << 31)
+
+#define R300_ZB_BW_CNTL                     0x4f1c
+#	define R300_HIZ_DISABLE                              (0 << 0)
+#	define R300_HIZ_ENABLE                               (1 << 0)
+#	define R300_HIZ_MIN                                  (0 << 1)
+#	define R300_HIZ_MAX                                  (1 << 1)
+#	define R300_FAST_FILL_DISABLE                        (0 << 2)
+#	define R300_FAST_FILL_ENABLE                         (1 << 2)
+#	define R300_RD_COMP_DISABLE                          (0 << 3)
+#	define R300_RD_COMP_ENABLE                           (1 << 3)
+#	define R300_WR_COMP_DISABLE                          (0 << 4)
+#	define R300_WR_COMP_ENABLE                           (1 << 4)
+#	define R300_ZB_CB_CLEAR_RMW                          (0 << 5)
+#	define R300_ZB_CB_CLEAR_CACHE_LINEAR                 (1 << 5)
+#	define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE   (0 << 6)
+#	define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE    (1 << 6)
+
+#	define R500_ZEQUAL_OPTIMIZE_ENABLE                   (0 << 7)
+#	define R500_ZEQUAL_OPTIMIZE_DISABLE                  (1 << 7)
+#	define R500_SEQUAL_OPTIMIZE_ENABLE                   (0 << 8)
+#	define R500_SEQUAL_OPTIMIZE_DISABLE                  (1 << 8)
+
+#	define R500_BMASK_ENABLE                             (0 << 10)
+#	define R500_BMASK_DISABLE                            (1 << 10)
+#	define R500_HIZ_EQUAL_REJECT_DISABLE                 (0 << 11)
+#	define R500_HIZ_EQUAL_REJECT_ENABLE                  (1 << 11)
+#	define R500_HIZ_FP_EXP_BITS_DISABLE                  (0 << 12)
+#	define R500_HIZ_FP_EXP_BITS_1                        (1 << 12)
+#	define R500_HIZ_FP_EXP_BITS_2                        (2 << 12)
+#	define R500_HIZ_FP_EXP_BITS_3                        (3 << 12)
+#	define R500_HIZ_FP_EXP_BITS_4                        (4 << 12)
+#	define R500_HIZ_FP_EXP_BITS_5                        (5 << 12)
+#	define R500_HIZ_FP_INVERT_LEADING_ONES               (0 << 15)
+#	define R500_HIZ_FP_INVERT_LEADING_ZEROS              (1 << 15)
+#	define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE      (0 << 16)
+#	define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE     (1 << 16)
+#	define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE           (0 << 17)
+#	define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE          (1 << 17)
+#	define R500_PEQ_PACKING_DISABLE                      (0 << 18)
+#	define R500_PEQ_PACKING_ENABLE                       (1 << 18)
+#	define R500_COVERED_PTR_MASKING_DISABLE              (0 << 18)
+#	define R500_COVERED_PTR_MASKING_ENABLE               (1 << 18)
+
+
+/* gap */
+
+/* Z Buffer Address Offset.
+ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
+ */
+#define R300_ZB_DEPTHOFFSET               0x4f20
+
+/* Z Buffer Pitch and Endian Control */
+#define R300_ZB_DEPTHPITCH                0x4f24
+#       define R300_DEPTHPITCH_MASK              0x00003FFC
+#       define R300_DEPTHMACROTILE_DISABLE      (0 << 16)
+#       define R300_DEPTHMACROTILE_ENABLE       (1 << 16)
+#       define R300_DEPTHMICROTILE_LINEAR       (0 << 17)
+#       define R300_DEPTHMICROTILE_TILED        (1 << 17)
+#       define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
+#       define R300_DEPTHENDIAN_NO_SWAP         (0 << 18)
+#       define R300_DEPTHENDIAN_WORD_SWAP       (1 << 18)
+#       define R300_DEPTHENDIAN_DWORD_SWAP      (2 << 18)
+#       define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
+
+/* Z Buffer Clear Value */
+#define R300_ZB_DEPTHCLEARVALUE                  0x4f28
+
+#define R300_ZB_ZMASK_OFFSET			 0x4f30
+#define R300_ZB_ZMASK_PITCH			 0x4f34
+#define R300_ZB_ZMASK_WRINDEX			 0x4f38
+#define R300_ZB_ZMASK_DWORD			 0x4f3c
+#define R300_ZB_ZMASK_RDINDEX			 0x4f40
+
+/* Hierarchical Z Memory Offset */
+#define R300_ZB_HIZ_OFFSET                       0x4f44
+
+/* Hierarchical Z Write Index */
+#define R300_ZB_HIZ_WRINDEX                      0x4f48
+
+/* Hierarchical Z Data */
+#define R300_ZB_HIZ_DWORD                        0x4f4c
+
+/* Hierarchical Z Read Index */
+#define R300_ZB_HIZ_RDINDEX                      0x4f50
+
+/* Hierarchical Z Pitch */
+#define R300_ZB_HIZ_PITCH                        0x4f54
+
+/* Z Buffer Z Pass Counter Data */
+#define R300_ZB_ZPASS_DATA                       0x4f58
+
+/* Z Buffer Z Pass Counter Address */
+#define R300_ZB_ZPASS_ADDR                       0x4f5c
+
+/* Depth buffer X and Y coordinate offset */
+#define R300_ZB_DEPTHXY_OFFSET                   0x4f60
+#	define R300_DEPTHX_OFFSET_SHIFT  1
+#	define R300_DEPTHX_OFFSET_MASK   0x000007FE
+#	define R300_DEPTHY_OFFSET_SHIFT  17
+#	define R300_DEPTHY_OFFSET_MASK   0x07FE0000
+
+/* Sets the fifo sizes */
+#define R500_ZB_FIFO_SIZE                        0x4fd0
+#	define R500_OP_FIFO_SIZE_FULL   (0 << 0)
+#	define R500_OP_FIFO_SIZE_HALF   (1 << 0)
+#	define R500_OP_FIFO_SIZE_QUATER (2 << 0)
+#	define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
+
+/* Stencil Reference Value and Mask for backfacing quads */
+/* R300_ZB_STENCILREFMASK handles front face */
+#define R500_ZB_STENCILREFMASK_BF                0x4fd4
+#	define R500_STENCILREF_SHIFT       0
+#	define R500_STENCILREF_MASK        0x000000ff
+#	define R500_STENCILMASK_SHIFT      8
+#	define R500_STENCILMASK_MASK       0x0000ff00
+#	define R500_STENCILWRITEMASK_SHIFT 16
+#	define R500_STENCILWRITEMASK_MASK  0x00ff0000
+
+/* BEGIN: Vertex program instruction set */
+
+/* Every instruction is four dwords long:
+ *  DWORD 0: output and opcode
+ *  DWORD 1: first argument
+ *  DWORD 2: second argument
+ *  DWORD 3: third argument
+ *
+ * Notes:
+ *  - ABS r, a is implemented as MAX r, a, -a
+ *  - MOV is implemented as ADD to zero
+ *  - XPD is implemented as MUL + MAD
+ *  - FLR is implemented as FRC + ADD
+ *  - apparently, fglrx tries to schedule instructions so that there is at
+ *    least one instruction between the write to a temporary and the first
+ *    read from said temporary; however, violations of this scheduling are
+ *    allowed
+ *  - register indices seem to be unrelated with OpenGL aliasing to
+ *    conventional state
+ *  - only one attribute and one parameter can be loaded at a time; however,
+ *    the same attribute/parameter can be used for more than one argument
+ *  - the second software argument for POW is the third hardware argument
+ *    (no idea why)
+ *  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+ *
+ * There is some magic surrounding LIT:
+ *   The single argument is replicated across all three inputs, but swizzled:
+ *     First argument: xyzy
+ *     Second argument: xyzx
+ *     Third argument: xyzw
+ *   Whenever the result is used later in the fragment program, fglrx forces
+ *   x and w to be 1.0 in the input selection; I don't know whether this is
+ *   strictly necessary
+ */
+#define R300_VPI_OUT_OP_DOT                     (1 << 0)
+#define R300_VPI_OUT_OP_MUL                     (2 << 0)
+#define R300_VPI_OUT_OP_ADD                     (3 << 0)
+#define R300_VPI_OUT_OP_MAD                     (4 << 0)
+#define R300_VPI_OUT_OP_DST                     (5 << 0)
+#define R300_VPI_OUT_OP_FRC                     (6 << 0)
+#define R300_VPI_OUT_OP_MAX                     (7 << 0)
+#define R300_VPI_OUT_OP_MIN                     (8 << 0)
+#define R300_VPI_OUT_OP_SGE                     (9 << 0)
+#define R300_VPI_OUT_OP_SLT                     (10 << 0)
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+#define R300_VPI_OUT_OP_UNK12                   (12 << 0)
+#define R300_VPI_OUT_OP_ARL                     (13 << 0)
+#define R300_VPI_OUT_OP_EXP                     (65 << 0)
+#define R300_VPI_OUT_OP_LOG                     (66 << 0)
+	/* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK67                   (67 << 0)
+#define R300_VPI_OUT_OP_LIT                     (68 << 0)
+#define R300_VPI_OUT_OP_POW                     (69 << 0)
+#define R300_VPI_OUT_OP_RCP                     (70 << 0)
+#define R300_VPI_OUT_OP_RSQ                     (72 << 0)
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK73                   (73 << 0)
+#define R300_VPI_OUT_OP_EX2                     (75 << 0)
+#define R300_VPI_OUT_OP_LG2                     (76 << 0)
+#define R300_VPI_OUT_OP_MAD_2                   (128 << 0)
+	/* all temps, vector(scalar, vector, vector) */
+#define R300_VPI_OUT_OP_UNK129                  (129 << 0)
+
+#define R300_VPI_OUT_REG_CLASS_TEMPORARY        (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_ADDR             (1 << 8)
+#define R300_VPI_OUT_REG_CLASS_RESULT           (2 << 8)
+#define R300_VPI_OUT_REG_CLASS_MASK             (31 << 8)
+
+#define R300_VPI_OUT_REG_INDEX_SHIFT            13
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13)
+
+#define R300_VPI_OUT_WRITE_X                    (1 << 20)
+#define R300_VPI_OUT_WRITE_Y                    (1 << 21)
+#define R300_VPI_OUT_WRITE_Z                    (1 << 22)
+#define R300_VPI_OUT_WRITE_W                    (1 << 23)
+
+#define R300_VPI_IN_REG_CLASS_TEMPORARY         (0 << 0)
+#define R300_VPI_IN_REG_CLASS_ATTRIBUTE         (1 << 0)
+#define R300_VPI_IN_REG_CLASS_PARAMETER         (2 << 0)
+#define R300_VPI_IN_REG_CLASS_NONE              (9 << 0)
+#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0)
+
+#define R300_VPI_IN_REG_INDEX_SHIFT             5
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5)
+
+/* The R300 can select components from the input register arbitrarily.
+ * Use the following constants, shifted by the component shift you
+ * want to select
+ */
+#define R300_VPI_IN_SELECT_X    0
+#define R300_VPI_IN_SELECT_Y    1
+#define R300_VPI_IN_SELECT_Z    2
+#define R300_VPI_IN_SELECT_W    3
+#define R300_VPI_IN_SELECT_ZERO 4
+#define R300_VPI_IN_SELECT_ONE  5
+#define R300_VPI_IN_SELECT_MASK 7
+
+#define R300_VPI_IN_X_SHIFT                     13
+#define R300_VPI_IN_Y_SHIFT                     16
+#define R300_VPI_IN_Z_SHIFT                     19
+#define R300_VPI_IN_W_SHIFT                     22
+
+#define R300_VPI_IN_NEG_X                       (1 << 25)
+#define R300_VPI_IN_NEG_Y                       (1 << 26)
+#define R300_VPI_IN_NEG_Z                       (1 << 27)
+#define R300_VPI_IN_NEG_W                       (1 << 28)
+/* END: Vertex program instruction set */
+
+/* BEGIN: Packet 3 commands */
+
+/* A primitive emission dword. */
+#define R300_PRIM_TYPE_NONE                     (0 << 0)
+#define R300_PRIM_TYPE_POINT                    (1 << 0)
+#define R300_PRIM_TYPE_LINE                     (2 << 0)
+#define R300_PRIM_TYPE_LINE_STRIP               (3 << 0)
+#define R300_PRIM_TYPE_TRI_LIST                 (4 << 0)
+#define R300_PRIM_TYPE_TRI_FAN                  (5 << 0)
+#define R300_PRIM_TYPE_TRI_STRIP                (6 << 0)
+#define R300_PRIM_TYPE_TRI_TYPE2                (7 << 0)
+#define R300_PRIM_TYPE_RECT_LIST                (8 << 0)
+#define R300_PRIM_TYPE_3VRT_POINT_LIST          (9 << 0)
+#define R300_PRIM_TYPE_3VRT_LINE_LIST           (10 << 0)
+	/* GUESS (based on r200) */
+#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0)
+#define R300_PRIM_TYPE_LINE_LOOP                (12 << 0)
+#define R300_PRIM_TYPE_QUADS                    (13 << 0)
+#define R300_PRIM_TYPE_QUAD_STRIP               (14 << 0)
+#define R300_PRIM_TYPE_POLYGON                  (15 << 0)
+#define R300_PRIM_TYPE_MASK                     0xF
+#define R300_PRIM_WALK_IND                      (1 << 4)
+#define R300_PRIM_WALK_LIST                     (2 << 4)
+#define R300_PRIM_WALK_RING                     (3 << 4)
+#define R300_PRIM_WALK_MASK                     (3 << 4)
+	/* GUESS (based on r200) */
+#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6)
+#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6)
+#define R300_PRIM_NUM_VERTICES_SHIFT            16
+#define R300_PRIM_NUM_VERTICES_MASK             0xffff
+
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+ * Two parameter dwords:
+ * 0. The first parameter appears to be always 0
+ * 1. The second parameter is a standard primitive emission dword.
+ */
+#define R300_PACKET3_3D_DRAW_VBUF           0x00002800
+
+/* Specify the full set of vertex arrays as (address, stride).
+ * The first parameter is the number of vertex arrays specified.
+ * The rest of the command is a variable length list of blocks, where
+ * each block is three dwords long and specifies two arrays.
+ * The first dword of a block is split into two words, the lower significant
+ * word refers to the first array, the more significant word to the second
+ * array in the block.
+ * The low byte of each word contains the size of an array entry in dwords,
+ * the high byte contains the stride of the array.
+ * The second dword of a block contains the pointer to the first array,
+ * the third dword of a block contains the pointer to the second array.
+ * Note that if the total number of arrays is odd, the third dword of
+ * the last block is omitted.
+ */
+#define R300_PACKET3_3D_LOAD_VBPNTR         0x00002F00
+
+#define R300_PACKET3_INDX_BUFFER            0x00003300
+#    define R300_EB_UNK1_SHIFT                      24
+#    define R300_EB_UNK1                    (0x80<<24)
+#    define R300_EB_UNK2                        0x0810
+#define R300_PACKET3_3D_DRAW_VBUF_2         0x00003400
+#define R300_PACKET3_3D_DRAW_INDX_2         0x00003600
+
+/* END: Packet 3 commands */
+
+
+/* Color formats for 2d packets
+ */
+#define R300_CP_COLOR_FORMAT_CI8	2
+#define R300_CP_COLOR_FORMAT_ARGB1555	3
+#define R300_CP_COLOR_FORMAT_RGB565	4
+#define R300_CP_COLOR_FORMAT_ARGB8888	6
+#define R300_CP_COLOR_FORMAT_RGB332	7
+#define R300_CP_COLOR_FORMAT_RGB8	9
+#define R300_CP_COLOR_FORMAT_ARGB4444	15
+
+/*
+ * CP type-3 packets
+ */
+#define R300_CP_CMD_BITBLT_MULTI	0xC0009B00
+
+#define R500_VAP_INDEX_OFFSET		0x208c
+
+#define R500_GA_US_VECTOR_INDEX         0x4250
+#define R500_GA_US_VECTOR_DATA          0x4254
+
+#define R500_RS_IP_0                    0x4074
+#define R500_RS_INST_0                  0x4320
+
+#define R500_US_CONFIG                  0x4600
+
+#define R500_US_FC_CTRL			0x4624
+#define R500_US_CODE_ADDR		0x4630
+
+#define R500_RB3D_COLOR_CLEAR_VALUE_AR  0x46c0
+#define R500_RB3D_CONSTANT_COLOR_AR     0x4ef8
+
+#define R300_SU_REG_DEST                0x42c8
+#define RV530_FG_ZBREG_DEST             0x4be8
+#define R300_ZB_ZPASS_DATA              0x4f58
+#define R300_ZB_ZPASS_ADDR              0x4f5c
+
+#endif /* _R300_REG_H */


Property changes on: trunk/sys/dev/drm2/radeon/r300_reg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r300_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r300_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r300_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,46 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r300_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned r300_reg_safe_bm[159] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+	0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
+	0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFC48, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
+	0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE00BFF,
+	0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
+	0x00000000, 0x0000C100, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x0003FC0B, 0xFFFFFCFF, 0xFFBFFB99,
+};


Property changes on: trunk/sys/dev/drm2/radeon/r300_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r300d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r300d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r300d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,358 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R300D_H__
+#define __R300D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r300d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_3D_CLEAR_ZMASK		0x32
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_3D_CLEAR_HIZ		0x37
+#define		PACKET3_3D_CLEAR_CMASK		0x38
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION                     0x00014C
+#define   S_00014C_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_00014C_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_00014C_MC_AGP_START                        0xFFFF0000
+#define   S_00014C_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_00014C_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_00014C_MC_AGP_TOP                          0x0000FFFF
+#define R_00015C_AGP_BASE_2                          0x00015C
+#define   S_00015C_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_00015C_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_00015C_AGP_BASE_ADDR_2                     0xFFFFFFF0
+#define R_000170_AGP_BASE                            0x000170
+#define   S_000170_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000170_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000170_AGP_BASE_ADDR                       0x00000000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/r300d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r420.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r420.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r420.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,492 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r420.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "r100d.h"
+#include "r420d.h"
+#include "r420_reg_safe.h"
+
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+static void r420_set_reg_safe(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+}
+
+void r420_pipes_init(struct radeon_device *rdev)
+{
+	unsigned tmp;
+	unsigned gb_pipe_select;
+	unsigned num_pipes;
+
+	/* GA_ENHANCE workaround TCL deadlock issue */
+	WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
+	       (1 << 2) | (1 << 3));
+	/* add idle wait as per freedesktop.org bug 24041 */
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+	/* get max number of pipes */
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+
+	/* SE chips have 1 pipe */
+	if ((rdev->ddev->pci_device == 0x5e4c) ||
+	    (rdev->ddev->pci_device == 0x5e4f))
+		num_pipes = 1;
+
+	rdev->num_gb_pipes = num_pipes;
+	tmp = 0;
+	switch (num_pipes) {
+	default:
+		/* force to 1 pipe */
+		num_pipes = 1;
+	case 1:
+		tmp = (0 << 1);
+		break;
+	case 2:
+		tmp = (3 << 1);
+		break;
+	case 3:
+		tmp = (6 << 1);
+		break;
+	case 4:
+		tmp = (7 << 1);
+		break;
+	}
+	WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
+	/* Sub pixel 1/12 so we can have 4K rendering according to doc */
+	tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
+	WREG32(R300_GB_TILE_CONFIG, tmp);
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
+
+	WREG32(R300_RB2D_DSTCACHE_MODE,
+	       RREG32(R300_RB2D_DSTCACHE_MODE) |
+	       R300_DC_AUTOFLUSH_ENABLE |
+	       R300_DC_DC_DISABLE_IGNORE_PE);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+
+	if (rdev->family == CHIP_RV530) {
+		tmp = RREG32(RV530_GB_PIPE_SELECT2);
+		if ((tmp & 3) == 3)
+			rdev->num_z_pipes = 2;
+		else
+			rdev->num_z_pipes = 1;
+	} else
+		rdev->num_z_pipes = 1;
+
+	DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
+		 rdev->num_gb_pipes, rdev->num_z_pipes);
+}
+
+u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
+{
+	u32 r;
+
+	WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
+	r = RREG32(R_0001FC_MC_IND_DATA);
+	return r;
+}
+
+void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
+		S_0001F8_MC_IND_WR_EN(1));
+	WREG32(R_0001FC_MC_IND_DATA, v);
+}
+
+static void r420_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+	}
+	if (r420_debugfs_pipes_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+}
+
+static void r420_clock_resume(struct radeon_device *rdev)
+{
+	u32 sclk_cntl;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_atom_set_clock_gating(rdev, 1);
+	sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
+	sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if (rdev->family == CHIP_R420)
+		sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
+}
+
+static void r420_cp_errata_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	/* RV410 and R420 can lock up if CP DMA to host memory happens
+	 * while the 2D engine is busy.
+	 *
+	 * The proper workaround is to queue a RESYNC at the beginning
+	 * of the CP init, apparently.
+	 */
+	radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+	radeon_ring_lock(rdev, ring, 8);
+	radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+	radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+}
+
+static void r420_cp_errata_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	/* Catch the RESYNC we dispatched all the way back,
+	 * at the very beginning of the CP init.
+	 */
+	radeon_ring_lock(rdev, ring, 8);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+}
+
+static int r420_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r300_mc_program(rdev);
+	/* Resume clock */
+	r420_clock_resume(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	r420_pipes_init(rdev);
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+	r420_cp_errata_init(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r420_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r420_clock_resume(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (rdev->is_atom_bios) {
+		atom_asic_init(rdev->mode_info.atom_context);
+	} else {
+		radeon_combios_asic_init(rdev->ddev);
+	}
+	/* Resume clock after posting */
+	r420_clock_resume(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r420_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r420_suspend(struct radeon_device *rdev)
+{
+	r420_cp_errata_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r420_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	if (rdev->is_atom_bios) {
+		radeon_atombios_fini(rdev);
+	} else {
+		radeon_combios_fini(rdev);
+	}
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+int r420_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r) {
+			return r;
+		}
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r) {
+			return r;
+		}
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r300_mc_init(rdev);
+	r420_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r) {
+		return r;
+	}
+	r = radeon_irq_kms_init(rdev);
+	if (r) {
+		return r;
+	}
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r) {
+		return r;
+	}
+	if (rdev->family == CHIP_R420)
+		r100_enable_bm(rdev);
+
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r420_set_reg_safe(rdev);
+
+	rdev->accel_working = true;
+	r = r420_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCIE)
+			rv370_pcie_gart_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(R400_GB_PIPE_SELECT);
+	seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+	tmp = RREG32(R300_GB_TILE_CONFIG);
+	seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list r420_pipes_info_list[] = {
+	{"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
+};
+#endif
+
+int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
+#else
+	return 0;
+#endif
+}


Property changes on: trunk/sys/dev/drm2/radeon/r420.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r420_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r420_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r420_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,46 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r420_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned r420_reg_safe_bm[159] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+	0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
+	0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFC48, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
+	0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE00BFF,
+	0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
+	0x00000000, 0x00000100, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0xFF800000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x0003FC0B, 0xFFFFFCFF, 0xFFBFFB99,
+};


Property changes on: trunk/sys/dev/drm2/radeon/r420_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r420d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r420d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r420d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,253 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef R420D_H
+#define R420D_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r420d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#define R_0001F8_MC_IND_INDEX                        0x0001F8
+#define   S_0001F8_MC_IND_ADDR(x)                      (((x) & 0x7F) << 0)
+#define   G_0001F8_MC_IND_ADDR(x)                      (((x) >> 0) & 0x7F)
+#define   C_0001F8_MC_IND_ADDR                         0xFFFFFF80
+#define   S_0001F8_MC_IND_WR_EN(x)                     (((x) & 0x1) << 8)
+#define   G_0001F8_MC_IND_WR_EN(x)                     (((x) >> 8) & 0x1)
+#define   C_0001F8_MC_IND_WR_EN                        0xFFFFFEFF
+#define R_0001FC_MC_IND_DATA                         0x0001FC
+#define   S_0001FC_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_0001FC_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0001FC_MC_IND_DATA                         0x00000000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+/* CLK registers */
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_VAP(x)                        (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_VAP(x)                        (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_VAP                           0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_SR(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_SR(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_SR                            0xFDFFFFFF
+#define   S_00000D_FORCE_PX(x)                         (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_PX(x)                         (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_PX                            0xFBFFFFFF
+#define   S_00000D_FORCE_TX(x)                         (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TX(x)                         (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TX                            0xF7FFFFFF
+#define   S_00000D_FORCE_US(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_US(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_US                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SU(x)                         (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SU(x)                         (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SU                            0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/r420d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r500_reg.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r500_reg.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r500_reg.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,803 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R500_REG_H__
+#define __R500_REG_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r500_reg.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+/* pipe config regs */
+#define R300_GA_POLY_MODE				0x4288
+#       define R300_FRONT_PTYPE_POINT                   (0 << 4)
+#       define R300_FRONT_PTYPE_LINE                    (1 << 4)
+#       define R300_FRONT_PTYPE_TRIANGE                 (2 << 4)
+#       define R300_BACK_PTYPE_POINT                    (0 << 7)
+#       define R300_BACK_PTYPE_LINE                     (1 << 7)
+#       define R300_BACK_PTYPE_TRIANGE                  (2 << 7)
+#define R300_GA_ROUND_MODE				0x428c
+#       define R300_GEOMETRY_ROUND_TRUNC                (0 << 0)
+#       define R300_GEOMETRY_ROUND_NEAREST              (1 << 0)
+#       define R300_COLOR_ROUND_TRUNC                   (0 << 2)
+#       define R300_COLOR_ROUND_NEAREST                 (1 << 2)
+#define R300_GB_MSPOS0				        0x4010
+#       define R300_MS_X0_SHIFT                         0
+#       define R300_MS_Y0_SHIFT                         4
+#       define R300_MS_X1_SHIFT                         8
+#       define R300_MS_Y1_SHIFT                         12
+#       define R300_MS_X2_SHIFT                         16
+#       define R300_MS_Y2_SHIFT                         20
+#       define R300_MSBD0_Y_SHIFT                       24
+#       define R300_MSBD0_X_SHIFT                       28
+#define R300_GB_MSPOS1				        0x4014
+#       define R300_MS_X3_SHIFT                         0
+#       define R300_MS_Y3_SHIFT                         4
+#       define R300_MS_X4_SHIFT                         8
+#       define R300_MS_Y4_SHIFT                         12
+#       define R300_MS_X5_SHIFT                         16
+#       define R300_MS_Y5_SHIFT                         20
+#       define R300_MSBD1_SHIFT                         24
+
+#define R300_GA_ENHANCE				        0x4274
+#       define R300_GA_DEADLOCK_CNTL                    (1 << 0)
+#       define R300_GA_FASTSYNC_CNTL                    (1 << 1)
+#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
+#	define R300_RB3D_DC_FLUSH		(2 << 0)
+#	define R300_RB3D_DC_FREE		(2 << 2)
+#	define R300_RB3D_DC_FINISH		(1 << 4)
+#define R300_RB3D_ZCACHE_CTLSTAT			0x4f18
+#       define R300_ZC_FLUSH                            (1 << 0)
+#       define R300_ZC_FREE                             (1 << 1)
+#       define R300_ZC_FLUSH_ALL                        0x3
+#define R400_GB_PIPE_SELECT             0x402c
+#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
+#define R500_SU_REG_DEST                0x42c8
+#define R300_GB_TILE_CONFIG             0x4018
+#       define R300_ENABLE_TILING       (1 << 0)
+#       define R300_PIPE_COUNT_RV350    (0 << 1)
+#       define R300_PIPE_COUNT_R300     (3 << 1)
+#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
+#       define R300_PIPE_COUNT_R420     (7 << 1)
+#       define R300_TILE_SIZE_8         (0 << 4)
+#       define R300_TILE_SIZE_16        (1 << 4)
+#       define R300_TILE_SIZE_32        (2 << 4)
+#       define R300_SUBPIXEL_1_12       (0 << 16)
+#       define R300_SUBPIXEL_1_16       (1 << 16)
+#define R300_DST_PIPE_CONFIG            0x170c
+#       define R300_PIPE_AUTO_CONFIG    (1U << 31)
+#define R300_RB2D_DSTCACHE_MODE         0x3428
+#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_CP_STAT		0x7C0
+#define RADEON_RBBM_CMDFIFO_ADDR	0xE70
+#define RADEON_RBBM_CMDFIFO_DATA	0xE74
+#define RADEON_ISYNC_CNTL		0x1724
+#	define RADEON_ISYNC_ANY2D_IDLE3D	(1 << 0)
+#	define RADEON_ISYNC_ANY3D_IDLE2D	(1 << 1)
+#	define RADEON_ISYNC_TRIG2D_IDLE3D	(1 << 2)
+#	define RADEON_ISYNC_TRIG3D_IDLE2D	(1 << 3)
+#	define RADEON_ISYNC_WAIT_IDLEGUI	(1 << 4)
+#	define RADEON_ISYNC_CPSCRATCH_IDLEGUI	(1 << 5)
+
+#define RS480_NB_MC_INDEX               0x168
+#	define RS480_NB_MC_IND_WR_EN	(1 << 8)
+#define RS480_NB_MC_DATA                0x16c
+
+/*
+ * RS690
+ */
+#define RS690_MCCFG_FB_LOCATION		0x100
+#define		RS690_MC_FB_START_MASK		0x0000FFFF
+#define		RS690_MC_FB_START_SHIFT		0
+#define		RS690_MC_FB_TOP_MASK		0xFFFF0000
+#define		RS690_MC_FB_TOP_SHIFT		16
+#define RS690_MCCFG_AGP_LOCATION	0x101
+#define		RS690_MC_AGP_START_MASK		0x0000FFFF
+#define		RS690_MC_AGP_START_SHIFT	0
+#define		RS690_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RS690_MC_AGP_TOP_SHIFT		16
+#define RS690_MCCFG_AGP_BASE		0x102
+#define RS690_MCCFG_AGP_BASE_2		0x103
+#define RS690_MC_INIT_MISC_LAT_TIMER            0x104
+#define RS690_HDP_FB_LOCATION		0x0134
+#define RS690_MC_INDEX				0x78
+#	define RS690_MC_INDEX_MASK		0x1ff
+#	define RS690_MC_INDEX_WR_EN		(1 << 9)
+#	define RS690_MC_INDEX_WR_ACK		0x7f
+#define RS690_MC_DATA				0x7c
+#define RS690_MC_STATUS                         0x90
+#define RS690_MC_STATUS_IDLE                    (1 << 0)
+#define RS480_AGP_BASE_2		0x0164
+#define RS480_MC_MISC_CNTL              0x18
+#	define RS480_DISABLE_GTW	(1 << 1)
+#	define RS480_GART_INDEX_REG_EN	(1 << 12)
+#	define RS690_BLOCK_GFX_D3_EN	(1 << 14)
+#define RS480_GART_FEATURE_ID           0x2b
+#	define RS480_HANG_EN	        (1 << 11)
+#	define RS480_TLB_ENABLE	        (1 << 18)
+#	define RS480_P2P_ENABLE	        (1 << 19)
+#	define RS480_GTW_LAC_EN	        (1 << 25)
+#	define RS480_2LEVEL_GART	(0 << 30)
+#	define RS480_1LEVEL_GART	(1 << 30)
+#	define RS480_PDC_EN	        (1U << 31)
+#define RS480_GART_BASE                 0x2c
+#define RS480_GART_CACHE_CNTRL          0x2e
+#	define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
+#	define RS480_GART_EN	        (1 << 0)
+#	define RS480_VA_SIZE_32MB	(0 << 1)
+#	define RS480_VA_SIZE_64MB	(1 << 1)
+#	define RS480_VA_SIZE_128MB	(2 << 1)
+#	define RS480_VA_SIZE_256MB	(3 << 1)
+#	define RS480_VA_SIZE_512MB	(4 << 1)
+#	define RS480_VA_SIZE_1GB	(5 << 1)
+#	define RS480_VA_SIZE_2GB	(6 << 1)
+#define RS480_AGP_MODE_CNTL             0x39
+#	define RS480_POST_GART_Q_SIZE	(1 << 18)
+#	define RS480_NONGART_SNOOP	(1 << 19)
+#	define RS480_AGP_RD_BUF_SIZE	(1 << 20)
+#	define RS480_REQ_TYPE_SNOOP_SHIFT 22
+#	define RS480_REQ_TYPE_SNOOP_MASK  0x3
+#	define RS480_REQ_TYPE_SNOOP_DIS	(1 << 24)
+
+#define RS690_AIC_CTRL_SCRATCH		0x3A
+#	define RS690_DIS_OUT_OF_PCI_GART_ACCESS	(1 << 1)
+
+/*
+ * RS600
+ */
+#define RS600_MC_STATUS                         0x0
+#define RS600_MC_STATUS_IDLE                    (1 << 0)
+#define RS600_MC_INDEX                          0x70
+#       define RS600_MC_ADDR_MASK               0xffff
+#       define RS600_MC_IND_SEQ_RBS_0           (1 << 16)
+#       define RS600_MC_IND_SEQ_RBS_1           (1 << 17)
+#       define RS600_MC_IND_SEQ_RBS_2           (1 << 18)
+#       define RS600_MC_IND_SEQ_RBS_3           (1 << 19)
+#       define RS600_MC_IND_AIC_RBS             (1 << 20)
+#       define RS600_MC_IND_CITF_ARB0           (1 << 21)
+#       define RS600_MC_IND_CITF_ARB1           (1 << 22)
+#       define RS600_MC_IND_WR_EN               (1 << 23)
+#define RS600_MC_DATA                           0x74
+#define RS600_MC_STATUS                         0x0
+#       define RS600_MC_IDLE                    (1 << 1)
+#define RS600_MC_FB_LOCATION                    0x4
+#define		RS600_MC_FB_START_MASK		0x0000FFFF
+#define		RS600_MC_FB_START_SHIFT		0
+#define		RS600_MC_FB_TOP_MASK		0xFFFF0000
+#define		RS600_MC_FB_TOP_SHIFT		16
+#define RS600_MC_AGP_LOCATION                   0x5
+#define		RS600_MC_AGP_START_MASK		0x0000FFFF
+#define		RS600_MC_AGP_START_SHIFT	0
+#define		RS600_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RS600_MC_AGP_TOP_SHIFT		16
+#define RS600_MC_AGP_BASE                          0x6
+#define RS600_MC_AGP_BASE_2                        0x7
+#define RS600_MC_CNTL1                          0x9
+#       define RS600_ENABLE_PAGE_TABLES         (1 << 26)
+#define RS600_MC_PT0_CNTL                       0x100
+#       define RS600_ENABLE_PT                  (1 << 0)
+#       define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+#       define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+#       define RS600_INVALIDATE_ALL_L1_TLBS     (1 << 28)
+#       define RS600_INVALIDATE_L2_CACHE        (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL              0x102
+#       define RS600_ENABLE_PAGE_TABLE          (1 << 0)
+#       define RS600_PAGE_TABLE_TYPE_FLAT       (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR   0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR  0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR    0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR   0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR     0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL               0x16c
+#       define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE       (1 << 0)
+#       define RS600_TRANSLATION_MODE_OVERRIDE              (1 << 1)
+#       define RS600_SYSTEM_ACCESS_MODE_MASK                (3 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_PA_ONLY             (0 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP         (1 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_IN_SYS              (2 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS          (3 << 8)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH        (0 << 10)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE       (1 << 10)
+#       define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+#       define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+#       define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+#       define RS600_INVALIDATE_L1_TLB          (1 << 20)
+/* rs600/rs690/rs740 */
+#	define RS600_BUS_MASTER_DIS		(1 << 14)
+#	define RS600_MSI_REARM		        (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+
+
+#define RV515_MC_FB_LOCATION		0x01
+#define		RV515_MC_FB_START_MASK		0x0000FFFF
+#define		RV515_MC_FB_START_SHIFT		0
+#define		RV515_MC_FB_TOP_MASK		0xFFFF0000
+#define		RV515_MC_FB_TOP_SHIFT		16
+#define RV515_MC_AGP_LOCATION		0x02
+#define		RV515_MC_AGP_START_MASK		0x0000FFFF
+#define		RV515_MC_AGP_START_SHIFT	0
+#define		RV515_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RV515_MC_AGP_TOP_SHIFT		16
+#define RV515_MC_AGP_BASE		0x03
+#define RV515_MC_AGP_BASE_2		0x04
+
+#define R520_MC_FB_LOCATION		0x04
+#define		R520_MC_FB_START_MASK		0x0000FFFF
+#define		R520_MC_FB_START_SHIFT		0
+#define		R520_MC_FB_TOP_MASK		0xFFFF0000
+#define		R520_MC_FB_TOP_SHIFT		16
+#define R520_MC_AGP_LOCATION		0x05
+#define		R520_MC_AGP_START_MASK		0x0000FFFF
+#define		R520_MC_AGP_START_SHIFT		0
+#define		R520_MC_AGP_TOP_MASK		0xFFFF0000
+#define		R520_MC_AGP_TOP_SHIFT		16
+#define R520_MC_AGP_BASE		0x06
+#define R520_MC_AGP_BASE_2		0x07
+
+
+#define AVIVO_MC_INDEX						0x0070
+#define R520_MC_STATUS 0x00
+#define R520_MC_STATUS_IDLE (1<<1)
+#define RV515_MC_STATUS 0x08
+#define RV515_MC_STATUS_IDLE (1<<4)
+#define RV515_MC_INIT_MISC_LAT_TIMER            0x09
+#define AVIVO_MC_DATA						0x0074
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA  0x74
+
+#define RV515_MC_CNTL          0x5
+#	define RV515_MEM_NUM_CHANNELS_MASK  0x3
+#define R520_MC_CNTL0          0x8
+#	define R520_MEM_NUM_CHANNELS_MASK  (0x3 << 24)
+#	define R520_MEM_NUM_CHANNELS_SHIFT  24
+#	define R520_MC_CHANNEL_SIZE  (1 << 23)
+
+#define AVIVO_CP_DYN_CNTL                              0x000f /* PLL */
+#       define AVIVO_CP_FORCEON                        (1 << 0)
+#define AVIVO_E2_DYN_CNTL                              0x0011 /* PLL */
+#       define AVIVO_E2_FORCEON                        (1 << 0)
+#define AVIVO_IDCT_DYN_CNTL                            0x0013 /* PLL */
+#       define AVIVO_IDCT_FORCEON                      (1 << 0)
+
+#define AVIVO_HDP_FB_LOCATION 0x134
+
+#define AVIVO_VGA_RENDER_CONTROL				0x0300
+#       define AVIVO_VGA_VSTATUS_CNTL_MASK                      (3 << 16)
+#define AVIVO_D1VGA_CONTROL					0x0330
+#       define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
+#       define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
+#       define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
+#       define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
+#define AVIVO_D2VGA_CONTROL					0x0338
+
+#define AVIVO_EXT1_PPLL_REF_DIV_SRC                             0x400
+#define AVIVO_EXT1_PPLL_REF_DIV                                 0x404
+#define AVIVO_EXT1_PPLL_UPDATE_LOCK                             0x408
+#define AVIVO_EXT1_PPLL_UPDATE_CNTL                             0x40c
+
+#define AVIVO_EXT2_PPLL_REF_DIV_SRC                             0x410
+#define AVIVO_EXT2_PPLL_REF_DIV                                 0x414
+#define AVIVO_EXT2_PPLL_UPDATE_LOCK                             0x418
+#define AVIVO_EXT2_PPLL_UPDATE_CNTL                             0x41c
+
+#define AVIVO_EXT1_PPLL_FB_DIV                                   0x430
+#define AVIVO_EXT2_PPLL_FB_DIV                                   0x434
+
+#define AVIVO_EXT1_PPLL_POST_DIV_SRC                                 0x438
+#define AVIVO_EXT1_PPLL_POST_DIV                                     0x43c
+
+#define AVIVO_EXT2_PPLL_POST_DIV_SRC                                 0x440
+#define AVIVO_EXT2_PPLL_POST_DIV                                     0x444
+
+#define AVIVO_EXT1_PPLL_CNTL                                    0x448
+#define AVIVO_EXT2_PPLL_CNTL                                    0x44c
+
+#define AVIVO_P1PLL_CNTL                                        0x450
+#define AVIVO_P2PLL_CNTL                                        0x454
+#define AVIVO_P1PLL_INT_SS_CNTL                                 0x458
+#define AVIVO_P2PLL_INT_SS_CNTL                                 0x45c
+#define AVIVO_P1PLL_TMDSA_CNTL                                  0x460
+#define AVIVO_P2PLL_LVTMA_CNTL                                  0x464
+
+#define AVIVO_PCLK_CRTC1_CNTL                                   0x480
+#define AVIVO_PCLK_CRTC2_CNTL                                   0x484
+
+#define AVIVO_D1CRTC_H_TOTAL					0x6000
+#define AVIVO_D1CRTC_H_BLANK_START_END                          0x6004
+#define AVIVO_D1CRTC_H_SYNC_A                                   0x6008
+#define AVIVO_D1CRTC_H_SYNC_A_CNTL                              0x600c
+#define AVIVO_D1CRTC_H_SYNC_B                                   0x6010
+#define AVIVO_D1CRTC_H_SYNC_B_CNTL                              0x6014
+
+#define AVIVO_D1CRTC_V_TOTAL					0x6020
+#define AVIVO_D1CRTC_V_BLANK_START_END                          0x6024
+#define AVIVO_D1CRTC_V_SYNC_A                                   0x6028
+#define AVIVO_D1CRTC_V_SYNC_A_CNTL                              0x602c
+#define AVIVO_D1CRTC_V_SYNC_B                                   0x6030
+#define AVIVO_D1CRTC_V_SYNC_B_CNTL                              0x6034
+
+#define AVIVO_D1CRTC_CONTROL                                    0x6080
+#       define AVIVO_CRTC_EN                                    (1 << 0)
+#       define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE             (1 << 24)
+#define AVIVO_D1CRTC_BLANK_CONTROL                              0x6084
+#define AVIVO_D1CRTC_INTERLACE_CONTROL                          0x6088
+#define AVIVO_D1CRTC_INTERLACE_STATUS                           0x608c
+#define AVIVO_D1CRTC_STATUS                                     0x609c
+#       define AVIVO_D1CRTC_V_BLANK                             (1 << 0)
+#define AVIVO_D1CRTC_STATUS_POSITION                            0x60a0
+#define AVIVO_D1CRTC_FRAME_COUNT                                0x60a4
+#define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
+
+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK                         0x60e0
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE                         0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK                                0x60e8
+
+/* master controls */
+#define AVIVO_DC_CRTC_MASTER_EN                                 0x60f8
+#define AVIVO_DC_CRTC_TV_CONTROL                                0x60fc
+
+#define AVIVO_D1GRPH_ENABLE                                     0x6100
+#define AVIVO_D1GRPH_CONTROL                                    0x6104
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP                  (0 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP                 (1 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP                 (2 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP                 (3 << 0)
+
+#       define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED                (0 << 8)
+
+#       define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555              (0 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_RGB565                (1 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444              (2 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_AI88                  (3 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_MONO16                (4 << 8)
+
+#       define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888              (0 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010           (1 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL               (2 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010        (3 << 8)
+
+
+#       define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616          (0 << 8)
+
+#       define AVIVO_D1GRPH_SWAP_RB                             (1 << 16)
+#       define AVIVO_D1GRPH_TILED                               (1 << 20)
+#       define AVIVO_D1GRPH_MACRO_ADDRESS_MODE                  (1 << 21)
+
+#       define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL            (0 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED            (1 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1            (2 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1            (4 << 20)
+
+/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
+ * block and vice versa.  This applies to GRPH, CUR, etc.
+ */
+#define AVIVO_D1GRPH_LUT_SEL                                    0x6108
+#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6914
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6114
+#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x691c
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x611c
+#define AVIVO_D1GRPH_PITCH                                      0x6120
+#define AVIVO_D1GRPH_SURFACE_OFFSET_X                           0x6124
+#define AVIVO_D1GRPH_SURFACE_OFFSET_Y                           0x6128
+#define AVIVO_D1GRPH_X_START                                    0x612c
+#define AVIVO_D1GRPH_Y_START                                    0x6130
+#define AVIVO_D1GRPH_X_END                                      0x6134
+#define AVIVO_D1GRPH_Y_END                                      0x6138
+#define AVIVO_D1GRPH_UPDATE                                     0x6144
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING              (1 << 2)
+#       define AVIVO_D1GRPH_UPDATE_LOCK                         (1 << 16)
+#define AVIVO_D1GRPH_FLIP_CONTROL                               0x6148
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN         (1 << 0)
+
+#define AVIVO_D1CUR_CONTROL                     0x6400
+#       define AVIVO_D1CURSOR_EN                (1 << 0)
+#       define AVIVO_D1CURSOR_MODE_SHIFT        8
+#       define AVIVO_D1CURSOR_MODE_MASK         (3 << 8)
+#       define AVIVO_D1CURSOR_MODE_24BPP        2
+#define AVIVO_D1CUR_SURFACE_ADDRESS             0x6408
+#define R700_D1CUR_SURFACE_ADDRESS_HIGH         0x6c0c
+#define R700_D2CUR_SURFACE_ADDRESS_HIGH         0x640c
+#define AVIVO_D1CUR_SIZE                        0x6410
+#define AVIVO_D1CUR_POSITION                    0x6414
+#define AVIVO_D1CUR_HOT_SPOT                    0x6418
+#define AVIVO_D1CUR_UPDATE                      0x6424
+#       define AVIVO_D1CURSOR_UPDATE_LOCK       (1 << 16)
+
+#define AVIVO_DC_LUT_RW_SELECT                  0x6480
+#define AVIVO_DC_LUT_RW_MODE                    0x6484
+#define AVIVO_DC_LUT_RW_INDEX                   0x6488
+#define AVIVO_DC_LUT_SEQ_COLOR                  0x648c
+#define AVIVO_DC_LUT_PWL_DATA                   0x6490
+#define AVIVO_DC_LUT_30_COLOR                   0x6494
+#define AVIVO_DC_LUT_READ_PIPE_SELECT           0x6498
+#define AVIVO_DC_LUT_WRITE_EN_MASK              0x649c
+#define AVIVO_DC_LUT_AUTOFILL                   0x64a0
+
+#define AVIVO_DC_LUTA_CONTROL                   0x64c0
+#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE         0x64c4
+#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN        0x64c8
+#define AVIVO_DC_LUTA_BLACK_OFFSET_RED          0x64cc
+#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE         0x64d0
+#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN        0x64d4
+#define AVIVO_DC_LUTA_WHITE_OFFSET_RED          0x64d8
+
+#define AVIVO_DC_LB_MEMORY_SPLIT                0x6520
+#       define AVIVO_DC_LB_MEMORY_SPLIT_MASK    0x3
+#       define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT   0
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF  0
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q    1
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY        2
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q    3
+#       define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
+#       define AVIVO_DC_LB_DISP1_END_ADR_SHIFT  4
+#       define AVIVO_DC_LB_DISP1_END_ADR_MASK   0x7ff
+
+#define AVIVO_D1MODE_DATA_FORMAT                0x6528
+#       define AVIVO_D1MODE_INTERLEAVE_EN       (1 << 0)
+#define AVIVO_D1MODE_DESKTOP_HEIGHT             0x652C
+#define AVIVO_D1MODE_VBLANK_STATUS              0x6534
+#       define AVIVO_VBLANK_ACK                 (1 << 4)
+#define AVIVO_D1MODE_VLINE_START_END            0x6538
+#define AVIVO_D1MODE_VLINE_STATUS               0x653c
+#       define AVIVO_D1MODE_VLINE_STAT          (1 << 12)
+#define AVIVO_DxMODE_INT_MASK                   0x6540
+#       define AVIVO_D1MODE_INT_MASK            (1 << 0)
+#       define AVIVO_D2MODE_INT_MASK            (1 << 8)
+#define AVIVO_D1MODE_VIEWPORT_START             0x6580
+#define AVIVO_D1MODE_VIEWPORT_SIZE              0x6584
+#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6588
+#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM    0x658c
+
+#define AVIVO_D1SCL_SCALER_ENABLE               0x6590
+#define AVIVO_D1SCL_SCALER_TAP_CONTROL		0x6594
+#define AVIVO_D1SCL_UPDATE                      0x65cc
+#       define AVIVO_D1SCL_UPDATE_LOCK          (1 << 16)
+
+/* second crtc */
+#define AVIVO_D2CRTC_H_TOTAL					0x6800
+#define AVIVO_D2CRTC_H_BLANK_START_END                          0x6804
+#define AVIVO_D2CRTC_H_SYNC_A                                   0x6808
+#define AVIVO_D2CRTC_H_SYNC_A_CNTL                              0x680c
+#define AVIVO_D2CRTC_H_SYNC_B                                   0x6810
+#define AVIVO_D2CRTC_H_SYNC_B_CNTL                              0x6814
+
+#define AVIVO_D2CRTC_V_TOTAL					0x6820
+#define AVIVO_D2CRTC_V_BLANK_START_END                          0x6824
+#define AVIVO_D2CRTC_V_SYNC_A                                   0x6828
+#define AVIVO_D2CRTC_V_SYNC_A_CNTL                              0x682c
+#define AVIVO_D2CRTC_V_SYNC_B                                   0x6830
+#define AVIVO_D2CRTC_V_SYNC_B_CNTL                              0x6834
+
+#define AVIVO_D2CRTC_CONTROL                                    0x6880
+#define AVIVO_D2CRTC_BLANK_CONTROL                              0x6884
+#define AVIVO_D2CRTC_INTERLACE_CONTROL                          0x6888
+#define AVIVO_D2CRTC_INTERLACE_STATUS                           0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION                            0x68a0
+#define AVIVO_D2CRTC_FRAME_COUNT                                0x68a4
+#define AVIVO_D2CRTC_STEREO_CONTROL                             0x68c4
+
+#define AVIVO_D2GRPH_ENABLE                                     0x6900
+#define AVIVO_D2GRPH_CONTROL                                    0x6904
+#define AVIVO_D2GRPH_LUT_SEL                                    0x6908
+#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS                    0x6910
+#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS                  0x6918
+#define AVIVO_D2GRPH_PITCH                                      0x6920
+#define AVIVO_D2GRPH_SURFACE_OFFSET_X                           0x6924
+#define AVIVO_D2GRPH_SURFACE_OFFSET_Y                           0x6928
+#define AVIVO_D2GRPH_X_START                                    0x692c
+#define AVIVO_D2GRPH_Y_START                                    0x6930
+#define AVIVO_D2GRPH_X_END                                      0x6934
+#define AVIVO_D2GRPH_Y_END                                      0x6938
+#define AVIVO_D2GRPH_UPDATE                                     0x6944
+#define AVIVO_D2GRPH_FLIP_CONTROL                               0x6948
+
+#define AVIVO_D2CUR_CONTROL                     0x6c00
+#define AVIVO_D2CUR_SURFACE_ADDRESS             0x6c08
+#define AVIVO_D2CUR_SIZE                        0x6c10
+#define AVIVO_D2CUR_POSITION                    0x6c14
+
+#define AVIVO_D2MODE_VBLANK_STATUS              0x6d34
+#define AVIVO_D2MODE_VLINE_START_END            0x6d38
+#define AVIVO_D2MODE_VLINE_STATUS               0x6d3c
+#define AVIVO_D2MODE_VIEWPORT_START             0x6d80
+#define AVIVO_D2MODE_VIEWPORT_SIZE              0x6d84
+#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6d88
+#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM    0x6d8c
+
+#define AVIVO_D2SCL_SCALER_ENABLE               0x6d90
+#define AVIVO_D2SCL_SCALER_TAP_CONTROL		0x6d94
+
+#define AVIVO_DDIA_BIT_DEPTH_CONTROL				0x7214
+
+#define AVIVO_DACA_ENABLE					0x7800
+#	define AVIVO_DAC_ENABLE				(1 << 0)
+#define AVIVO_DACA_SOURCE_SELECT				0x7804
+#       define AVIVO_DAC_SOURCE_CRTC1                   (0 << 0)
+#       define AVIVO_DAC_SOURCE_CRTC2                   (1 << 0)
+#       define AVIVO_DAC_SOURCE_TV                      (2 << 0)
+
+#define AVIVO_DACA_FORCE_OUTPUT_CNTL				0x783c
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN             (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT            (8)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE             (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN            (1 << 1)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED              (1 << 2)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY       (1 << 24)
+#define AVIVO_DACA_POWERDOWN					0x7850
+# define AVIVO_DACA_POWERDOWN_POWERDOWN                         (1 << 0)
+# define AVIVO_DACA_POWERDOWN_BLUE                              (1 << 8)
+# define AVIVO_DACA_POWERDOWN_GREEN                             (1 << 16)
+# define AVIVO_DACA_POWERDOWN_RED                               (1 << 24)
+
+#define AVIVO_DACB_ENABLE					0x7a00
+#define AVIVO_DACB_SOURCE_SELECT				0x7a04
+#define AVIVO_DACB_FORCE_OUTPUT_CNTL				0x7a3c
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN             (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT            (8)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE             (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN            (1 << 1)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED              (1 << 2)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY       (1 << 24)
+#define AVIVO_DACB_POWERDOWN					0x7a50
+# define AVIVO_DACB_POWERDOWN_POWERDOWN                         (1 << 0)
+# define AVIVO_DACB_POWERDOWN_BLUE                              (1 << 8)
+# define AVIVO_DACB_POWERDOWN_GREEN                             (1 << 16)
+# define AVIVO_DACB_POWERDOWN_RED
+
+#define AVIVO_TMDSA_CNTL                    0x7880
+#   define AVIVO_TMDSA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_TMDSA_CNTL_HDMI_EN              (1 << 2)
+#   define AVIVO_TMDSA_CNTL_HPD_MASK             (1 << 4)
+#   define AVIVO_TMDSA_CNTL_HPD_SELECT           (1 << 8)
+#   define AVIVO_TMDSA_CNTL_SYNC_PHASE           (1 << 12)
+#   define AVIVO_TMDSA_CNTL_PIXEL_ENCODING       (1 << 16)
+#   define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE     (1 << 24)
+#   define AVIVO_TMDSA_CNTL_SWAP                 (1 << 28)
+#define AVIVO_TMDSA_SOURCE_SELECT				0x7884
+/* 78a8 appears to be some kind of (reasonably tolerant) clock?
+ * 78d0 definitely hits the transmitter, definitely clock. */
+/* MYSTERY1 This appears to control dithering? */
+#define AVIVO_TMDSA_BIT_DEPTH_CONTROL		0x7894
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN           (1 << 0)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH        (1 << 4)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN     (1 << 8)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH  (1 << 12)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN    (1 << 16)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL        (1 << 24)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+#define AVIVO_TMDSA_DCBALANCER_CONTROL                  0x78d0
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_EN                  (1 << 0)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN             (1 << 8)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT       (16)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE               (1 << 24)
+#define AVIVO_TMDSA_DATA_SYNCHRONIZATION                0x78d8
+#   define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL           (1 << 0)
+#   define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG          (1 << 8)
+#define AVIVO_TMDSA_CLOCK_ENABLE            0x7900
+#define AVIVO_TMDSA_TRANSMITTER_ENABLE              0x7904
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE          (1 << 0)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN             (1 << 1)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN            (1 << 2)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN            (1 << 3)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN            (1 << 4)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE          (1 << 8)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN            (1 << 10)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN            (1 << 11)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN            (1 << 12)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK  (1 << 16)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK     (1 << 17)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK     (1 << 18)
+
+#define AVIVO_TMDSA_TRANSMITTER_CONTROL				0x7910
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE	(1 << 0)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET	(1 << 1)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT	(2)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL	        (1 << 4)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP          (1 << 5)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN	(1 << 6)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK	        (1 << 8)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS	(1 << 13)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK	        (1 << 14)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS	(1 << 15)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL	(1 << 28)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA     (1 << 29)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL	(1U << 31)
+
+#define AVIVO_LVTMA_CNTL					0x7a80
+#   define AVIVO_LVTMA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_LVTMA_CNTL_HDMI_EN              (1 << 2)
+#   define AVIVO_LVTMA_CNTL_HPD_MASK             (1 << 4)
+#   define AVIVO_LVTMA_CNTL_HPD_SELECT           (1 << 8)
+#   define AVIVO_LVTMA_CNTL_SYNC_PHASE           (1 << 12)
+#   define AVIVO_LVTMA_CNTL_PIXEL_ENCODING       (1 << 16)
+#   define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE     (1 << 24)
+#   define AVIVO_LVTMA_CNTL_SWAP                 (1 << 28)
+#define AVIVO_LVTMA_SOURCE_SELECT                               0x7a84
+#define AVIVO_LVTMA_COLOR_FORMAT                                0x7a88
+#define AVIVO_LVTMA_BIT_DEPTH_CONTROL                           0x7a94
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN           (1 << 0)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH        (1 << 4)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN     (1 << 8)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH  (1 << 12)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN    (1 << 16)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL        (1 << 24)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+
+
+
+#define AVIVO_LVTMA_DCBALANCER_CONTROL                  0x7ad0
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_EN                  (1 << 0)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN             (1 << 8)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT       (16)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE               (1 << 24)
+
+#define AVIVO_LVTMA_DATA_SYNCHRONIZATION                0x78d8
+#   define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL           (1 << 0)
+#   define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG          (1 << 8)
+#define R500_LVTMA_CLOCK_ENABLE			0x7b00
+#define R600_LVTMA_CLOCK_ENABLE			0x7b04
+
+#define R500_LVTMA_TRANSMITTER_ENABLE              0x7b04
+#define R600_LVTMA_TRANSMITTER_ENABLE              0x7b08
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN             (1 << 1)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN            (1 << 2)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN            (1 << 3)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN            (1 << 4)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN            (1 << 5)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN             (1 << 9)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN            (1 << 10)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN            (1 << 11)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN            (1 << 12)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK     (1 << 17)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK     (1 << 18)
+
+#define R500_LVTMA_TRANSMITTER_CONTROL			        0x7b10
+#define R600_LVTMA_TRANSMITTER_CONTROL			        0x7b14
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE	  (1 << 0)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET	  (1 << 1)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL	          (1 << 4)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP            (1 << 5)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN	  (1 << 6)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK	          (1 << 8)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS	  (1 << 13)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK	          (1 << 14)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS	  (1 << 15)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT  (16)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL	  (1 << 28)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA       (1 << 29)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1U << 31)
+
+#define R500_LVTMA_PWRSEQ_CNTL						0x7af0
+#define R600_LVTMA_PWRSEQ_CNTL						0x7af4
+#	define AVIVO_LVTMA_PWRSEQ_EN					    (1 << 0)
+#	define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK			    (1 << 2)
+#	define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK			    (1 << 3)
+#	define AVIVO_LVTMA_PWRSEQ_TARGET_STATE				    (1 << 4)
+#	define AVIVO_LVTMA_SYNCEN					    (1 << 8)
+#	define AVIVO_LVTMA_SYNCEN_OVRD					    (1 << 9)
+#	define AVIVO_LVTMA_SYNCEN_POL					    (1 << 10)
+#	define AVIVO_LVTMA_DIGON					    (1 << 16)
+#	define AVIVO_LVTMA_DIGON_OVRD					    (1 << 17)
+#	define AVIVO_LVTMA_DIGON_POL					    (1 << 18)
+#	define AVIVO_LVTMA_BLON						    (1 << 24)
+#	define AVIVO_LVTMA_BLON_OVRD					    (1 << 25)
+#	define AVIVO_LVTMA_BLON_POL					    (1 << 26)
+
+#define R500_LVTMA_PWRSEQ_STATE                        0x7af4
+#define R600_LVTMA_PWRSEQ_STATE                        0x7af8
+#       define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R          (1 << 0)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_DIGON                   (1 << 1)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN                  (1 << 2)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_BLON                    (1 << 3)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_DONE                    (1 << 4)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT            (8)
+
+#define AVIVO_LVDS_BACKLIGHT_CNTL			0x7af8
+#	define AVIVO_LVDS_BACKLIGHT_CNTL_EN			(1 << 0)
+#	define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK		0x0000ff00
+#	define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT		8
+
+#define AVIVO_DVOA_BIT_DEPTH_CONTROL			0x7988
+
+#define AVIVO_DC_GPIO_HPD_A                 0x7e94
+#define AVIVO_DC_GPIO_HPD_Y                 0x7e9c
+
+#define AVIVO_DC_I2C_STATUS1				0x7d30
+#	define AVIVO_DC_I2C_DONE			(1 << 0)
+#	define AVIVO_DC_I2C_NACK			(1 << 1)
+#	define AVIVO_DC_I2C_HALT			(1 << 2)
+#	define AVIVO_DC_I2C_GO			        (1 << 3)
+#define AVIVO_DC_I2C_RESET 				0x7d34
+#	define AVIVO_DC_I2C_SOFT_RESET			(1 << 0)
+#	define AVIVO_DC_I2C_ABORT			(1 << 8)
+#define AVIVO_DC_I2C_CONTROL1 				0x7d38
+#	define AVIVO_DC_I2C_START			(1 << 0)
+#	define AVIVO_DC_I2C_STOP			(1 << 1)
+#	define AVIVO_DC_I2C_RECEIVE			(1 << 2)
+#	define AVIVO_DC_I2C_EN			        (1 << 8)
+#	define AVIVO_DC_I2C_PIN_SELECT(x)		((x) << 16)
+#	define AVIVO_SEL_DDC1			        0
+#	define AVIVO_SEL_DDC2			        1
+#	define AVIVO_SEL_DDC3			        2
+#define AVIVO_DC_I2C_CONTROL2 				0x7d3c
+#	define AVIVO_DC_I2C_ADDR_COUNT(x)		((x) << 0)
+#	define AVIVO_DC_I2C_DATA_COUNT(x)		((x) << 8)
+#define AVIVO_DC_I2C_CONTROL3 				0x7d40
+#	define AVIVO_DC_I2C_DATA_DRIVE_EN		(1 << 0)
+#	define AVIVO_DC_I2C_DATA_DRIVE_SEL		(1 << 1)
+#	define AVIVO_DC_I2C_CLK_DRIVE_EN		(1 << 7)
+#	define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x)      ((x) << 8)
+#	define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x)	((x) << 16)
+#	define AVIVO_DC_I2C_TIME_LIMIT(x)		((x) << 24)
+#define AVIVO_DC_I2C_DATA 				0x7d44
+#define AVIVO_DC_I2C_INTERRUPT_CONTROL 			0x7d48
+#	define AVIVO_DC_I2C_INTERRUPT_STATUS		(1 << 0)
+#	define AVIVO_DC_I2C_INTERRUPT_AK		(1 << 8)
+#	define AVIVO_DC_I2C_INTERRUPT_ENABLE		(1 << 16)
+#define AVIVO_DC_I2C_ARBITRATION 			0x7d50
+#	define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C		(1 << 0)
+#	define AVIVO_DC_I2C_SW_CAN_USE_I2C		(1 << 1)
+#	define AVIVO_DC_I2C_SW_DONE_USING_I2C		(1 << 8)
+#	define AVIVO_DC_I2C_HW_NEEDS_I2C		(1 << 9)
+#	define AVIVO_DC_I2C_ABORT_HDCP_I2C		(1 << 16)
+#	define AVIVO_DC_I2C_HW_USING_I2C		(1 << 17)
+
+#define AVIVO_DC_GPIO_DDC1_MASK 		        0x7e40
+#define AVIVO_DC_GPIO_DDC1_A 		                0x7e44
+#define AVIVO_DC_GPIO_DDC1_EN 		                0x7e48
+#define AVIVO_DC_GPIO_DDC1_Y 		                0x7e4c
+
+#define AVIVO_DC_GPIO_DDC2_MASK 		        0x7e50
+#define AVIVO_DC_GPIO_DDC2_A 		                0x7e54
+#define AVIVO_DC_GPIO_DDC2_EN 		                0x7e58
+#define AVIVO_DC_GPIO_DDC2_Y 		                0x7e5c
+
+#define AVIVO_DC_GPIO_DDC3_MASK 		        0x7e60
+#define AVIVO_DC_GPIO_DDC3_A 		                0x7e64
+#define AVIVO_DC_GPIO_DDC3_EN 		                0x7e68
+#define AVIVO_DC_GPIO_DDC3_Y 		                0x7e6c
+
+#define AVIVO_DISP_INTERRUPT_STATUS                             0x7edc
+#       define AVIVO_D1_VBLANK_INTERRUPT                        (1 << 4)
+#       define AVIVO_D2_VBLANK_INTERRUPT                        (1 << 5)
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/r500_reg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r520.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r520.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r520.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,331 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r520.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "r520d.h"
+
+/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
+
+int r520_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(R520_MC_STATUS);
+		if (tmp & R520_MC_STATUS_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void r520_gpu_init(struct radeon_device *rdev)
+{
+	unsigned pipe_select_current, gb_pipe_select, tmp;
+
+	rv515_vga_render_disable(rdev);
+	/*
+	 * DST_PIPE_CONFIG		0x170C
+	 * GB_TILE_CONFIG		0x4018
+	 * GB_FIFO_SIZE			0x4024
+	 * GB_PIPE_SELECT		0x402C
+	 * GB_PIPE_SELECT2              0x4124
+	 *	Z_PIPE_SHIFT			0
+	 *	Z_PIPE_MASK			0x000000003
+	 * GB_FIFO_SIZE2                0x4128
+	 *	SC_SFIFO_SIZE_SHIFT		0
+	 *	SC_SFIFO_SIZE_MASK		0x000000003
+	 *	SC_MFIFO_SIZE_SHIFT		2
+	 *	SC_MFIFO_SIZE_MASK		0x00000000C
+	 *	FG_SFIFO_SIZE_SHIFT		4
+	 *	FG_SFIFO_SIZE_MASK		0x000000030
+	 *	ZB_MFIFO_SIZE_SHIFT		6
+	 *	ZB_MFIFO_SIZE_MASK		0x0000000C0
+	 * GA_ENHANCE			0x4274
+	 * SU_REG_DEST			0x42C8
+	 */
+	/* workaround for RV530 */
+	if (rdev->family == CHIP_RV530) {
+		WREG32(0x4128, 0xFF);
+	}
+	r420_pipes_init(rdev);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	pipe_select_current = (tmp >> 2) & 3;
+	tmp = (1 << pipe_select_current) |
+	      (((gb_pipe_select >> 8) & 0xF) << 4);
+	WREG32_PLL(0x000D, tmp);
+	if (r520_mc_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+}
+
+static void r520_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_width = 128;
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32_MC(R520_MC_CNTL0);
+	switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
+	case 0:
+		rdev->mc.vram_width = 32;
+		break;
+	case 1:
+		rdev->mc.vram_width = 64;
+		break;
+	case 2:
+		rdev->mc.vram_width = 128;
+		break;
+	case 3:
+		rdev->mc.vram_width = 256;
+		break;
+	default:
+		rdev->mc.vram_width = 128;
+		break;
+	}
+	if (tmp & R520_MC_CHANNEL_SIZE)
+		rdev->mc.vram_width *= 2;
+}
+
+static void r520_mc_init(struct radeon_device *rdev)
+{
+
+	r520_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	radeon_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+static void r520_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (r520_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Write VRAM size in case we are limiting it */
+	WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000004_MC_FB_LOCATION,
+			S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32_MC(R_000005_MC_AGP_LOCATION,
+			S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32_MC(R_000007_AGP_BASE_2,
+			S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+	} else {
+		WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
+		WREG32_MC(R_000006_AGP_BASE, 0);
+		WREG32_MC(R_000007_AGP_BASE_2, 0);
+	}
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int r520_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	r520_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	r520_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r520_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r520_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r520_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	if (!radeon_card_posted(rdev) && rdev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r520_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rv370_pcie_gart_init(rdev);
+	if (r)
+		return r;
+	rv515_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = r520_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv370_pcie_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/r520.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r520d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r520d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r520d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,191 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R520D_H__
+#define __R520D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r520d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* Registers */
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_RBBM_HIBUSY(x)                      (((x) & 0x1) << 28)
+#define   G_000E40_RBBM_HIBUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_000E40_RBBM_HIBUSY                         0xEFFFFFFF
+#define   S_000E40_SKID_CFBUSY(x)                      (((x) & 0x1) << 29)
+#define   G_000E40_SKID_CFBUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_000E40_SKID_CFBUSY                         0xDFFFFFFF
+#define   S_000E40_VAP_VF_BUSY(x)                      (((x) & 0x1) << 30)
+#define   G_000E40_VAP_VF_BUSY(x)                      (((x) >> 30) & 0x1)
+#define   C_000E40_VAP_VF_BUSY                         0xBFFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+
+#define R_000004_MC_FB_LOCATION                      0x000004
+#define   S_000004_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000004_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000004_MC_FB_START                         0xFFFF0000
+#define   S_000004_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000004_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000004_MC_FB_TOP                           0x0000FFFF
+#define R_000005_MC_AGP_LOCATION                     0x000005
+#define   S_000005_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000005_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000005_MC_AGP_START                        0xFFFF0000
+#define   S_000005_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000005_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000005_MC_AGP_TOP                          0x0000FFFF
+#define R_000006_AGP_BASE                            0x000006
+#define   S_000006_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000006_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000006_AGP_BASE_ADDR                       0x00000000
+#define R_000007_AGP_BASE_2                          0x000007
+#define   S_000007_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000007_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000007_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/r520d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r600.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,4397 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_mode.h"
+#include "r600d.h"
+#include "atom.h"
+#include "avivod.h"
+
+#define PFP_UCODE_SIZE 576
+#define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define ARUBA_RLC_UCODE_SIZE 1536
+
+#ifdef __linux__
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/R600_pfp.bin");
+MODULE_FIRMWARE("radeon/R600_me.bin");
+MODULE_FIRMWARE("radeon/RV610_pfp.bin");
+MODULE_FIRMWARE("radeon/RV610_me.bin");
+MODULE_FIRMWARE("radeon/RV630_pfp.bin");
+MODULE_FIRMWARE("radeon/RV630_me.bin");
+MODULE_FIRMWARE("radeon/RV620_pfp.bin");
+MODULE_FIRMWARE("radeon/RV620_me.bin");
+MODULE_FIRMWARE("radeon/RV635_pfp.bin");
+MODULE_FIRMWARE("radeon/RV635_me.bin");
+MODULE_FIRMWARE("radeon/RV670_pfp.bin");
+MODULE_FIRMWARE("radeon/RV670_me.bin");
+MODULE_FIRMWARE("radeon/RS780_pfp.bin");
+MODULE_FIRMWARE("radeon/RS780_me.bin");
+MODULE_FIRMWARE("radeon/RV770_pfp.bin");
+MODULE_FIRMWARE("radeon/RV770_me.bin");
+MODULE_FIRMWARE("radeon/RV730_pfp.bin");
+MODULE_FIRMWARE("radeon/RV730_me.bin");
+MODULE_FIRMWARE("radeon/RV710_pfp.bin");
+MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
+MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO_me.bin");
+MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+#endif
+
+int r600_debugfs_mc_info_init(struct radeon_device *rdev);
+
+/* r600,rv610,rv630,rv620,rv635,rv670 */
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+#endif
+static void r600_gpu_init(struct radeon_device *rdev);
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+void r600_fini(struct radeon_device *rdev);
+#endif
+void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+
+/* get temperature in millidegrees */
+int rv6xx_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
+		ASIC_T_SHIFT;
+	int actual_temp = temp & 0xff;
+
+	if (temp & 0x100)
+		actual_temp -= 256;
+
+	return actual_temp * 1000;
+}
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+
+	/* power state array is low to high, default is first */
+	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+		int min_power_state_index = 0;
+
+		if (rdev->pm.num_power_states > 2)
+			min_power_state_index = 1;
+
+		switch (rdev->pm.dynpm_planned_action) {
+		case DYNPM_ACTION_MINIMUM:
+			rdev->pm.requested_power_state_index = min_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_downclock = false;
+			break;
+		case DYNPM_ACTION_DOWNCLOCK:
+			if (rdev->pm.current_power_state_index == min_power_state_index) {
+				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+				rdev->pm.dynpm_can_downclock = false;
+			} else {
+				if (rdev->pm.active_crtc_count > 1) {
+					for (i = 0; i < rdev->pm.num_power_states; i++) {
+						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+							continue;
+						else if (i >= rdev->pm.current_power_state_index) {
+							rdev->pm.requested_power_state_index =
+								rdev->pm.current_power_state_index;
+							break;
+						} else {
+							rdev->pm.requested_power_state_index = i;
+							break;
+						}
+					}
+				} else {
+					if (rdev->pm.current_power_state_index == 0)
+						rdev->pm.requested_power_state_index =
+							rdev->pm.num_power_states - 1;
+					else
+						rdev->pm.requested_power_state_index =
+							rdev->pm.current_power_state_index - 1;
+				}
+			}
+			rdev->pm.requested_clock_mode_index = 0;
+			/* don't use the power state if crtcs are active and no display flag is set */
+			if ((rdev->pm.active_crtc_count > 0) &&
+			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			     clock_info[rdev->pm.requested_clock_mode_index].flags &
+			     RADEON_PM_MODE_NO_DISPLAY)) {
+				rdev->pm.requested_power_state_index++;
+			}
+			break;
+		case DYNPM_ACTION_UPCLOCK:
+			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+				rdev->pm.dynpm_can_upclock = false;
+			} else {
+				if (rdev->pm.active_crtc_count > 1) {
+					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+							continue;
+						else if (i <= rdev->pm.current_power_state_index) {
+							rdev->pm.requested_power_state_index =
+								rdev->pm.current_power_state_index;
+							break;
+						} else {
+							rdev->pm.requested_power_state_index = i;
+							break;
+						}
+					}
+				} else
+					rdev->pm.requested_power_state_index =
+						rdev->pm.current_power_state_index + 1;
+			}
+			rdev->pm.requested_clock_mode_index = 0;
+			break;
+		case DYNPM_ACTION_DEFAULT:
+			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_upclock = false;
+			break;
+		case DYNPM_ACTION_NONE:
+		default:
+			DRM_ERROR("Requested mode for not defined action\n");
+			return;
+		}
+	} else {
+		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
+		/* for now just select the first power state and switch between clock modes */
+		/* power state array is low to high, default is first (0) */
+		if (rdev->pm.active_crtc_count > 1) {
+			rdev->pm.requested_power_state_index = -1;
+			/* start at 1 as we don't want the default mode */
+			for (i = 1; i < rdev->pm.num_power_states; i++) {
+				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+					continue;
+				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+					rdev->pm.requested_power_state_index = i;
+					break;
+				}
+			}
+			/* if nothing selected, grab the default state. */
+			if (rdev->pm.requested_power_state_index == -1)
+				rdev->pm.requested_power_state_index = 0;
+		} else
+			rdev->pm.requested_power_state_index = 1;
+
+		switch (rdev->pm.dynpm_planned_action) {
+		case DYNPM_ACTION_MINIMUM:
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_downclock = false;
+			break;
+		case DYNPM_ACTION_DOWNCLOCK:
+			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+				if (rdev->pm.current_clock_mode_index == 0) {
+					rdev->pm.requested_clock_mode_index = 0;
+					rdev->pm.dynpm_can_downclock = false;
+				} else
+					rdev->pm.requested_clock_mode_index =
+						rdev->pm.current_clock_mode_index - 1;
+			} else {
+				rdev->pm.requested_clock_mode_index = 0;
+				rdev->pm.dynpm_can_downclock = false;
+			}
+			/* don't use the power state if crtcs are active and no display flag is set */
+			if ((rdev->pm.active_crtc_count > 0) &&
+			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			     clock_info[rdev->pm.requested_clock_mode_index].flags &
+			     RADEON_PM_MODE_NO_DISPLAY)) {
+				rdev->pm.requested_clock_mode_index++;
+			}
+			break;
+		case DYNPM_ACTION_UPCLOCK:
+			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+				if (rdev->pm.current_clock_mode_index ==
+				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+					rdev->pm.dynpm_can_upclock = false;
+				} else
+					rdev->pm.requested_clock_mode_index =
+						rdev->pm.current_clock_mode_index + 1;
+			} else {
+				rdev->pm.requested_clock_mode_index =
+					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+				rdev->pm.dynpm_can_upclock = false;
+			}
+			break;
+		case DYNPM_ACTION_DEFAULT:
+			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_upclock = false;
+			break;
+		case DYNPM_ACTION_NONE:
+		default:
+			DRM_ERROR("Requested mode for not defined action\n");
+			return;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  pcie_lanes);
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+	if (rdev->pm.num_power_states == 2) {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else if (rdev->pm.num_power_states == 3) {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	}
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	if (rdev->family == CHIP_R600) {
+		/* XXX */
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else {
+		if (rdev->pm.num_power_states < 4) {
+			/* default */
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+			/* low sh */
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+			/* mid sh */
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+			/* high sh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+			/* low mh */
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+			/* low mh */
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+			/* high mh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+		} else {
+			/* default */
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+			/* low sh */
+			if (rdev->flags & RADEON_IS_MOBILITY)
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+			else
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+			/* mid sh */
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+			/* high sh */
+			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+			/* low mh */
+			if (rdev->flags & RADEON_IS_MOBILITY)
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+			else
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+			/* mid mh */
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+			/* high mh */
+			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+		}
+	}
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->voltage == 0xff01)
+			return;
+		if (voltage->voltage != rdev->pm.current_vddc) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
+		}
+	}
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+		return false;
+	else
+		return true;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_2:
+			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_3:
+			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_4:
+			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+			/* DCE 3.2 */
+		case RADEON_HPD_5:
+			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_6:
+			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_2:
+			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_3:
+			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		default:
+			break;
+		}
+	}
+	return connected;
+}
+
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r600_hpd_sense(rdev, hpd);
+
+	if (ASIC_IS_DCE3(rdev)) {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			tmp = RREG32(DC_HPD1_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD1_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			tmp = RREG32(DC_HPD2_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD2_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			tmp = RREG32(DC_HPD3_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD3_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_4:
+			tmp = RREG32(DC_HPD4_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD4_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_5:
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+			/* DCE 3.2 */
+		case RADEON_HPD_6:
+			tmp = RREG32(DC_HPD6_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+void r600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 */
+			continue;
+		}
+		if (ASIC_IS_DCE3(rdev)) {
+			u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+			if (ASIC_IS_DCE32(rdev))
+				tmp |= DC_HPDx_EN;
+
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HPD1_CONTROL, tmp);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HPD2_CONTROL, tmp);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HPD3_CONTROL, tmp);
+				break;
+			case RADEON_HPD_4:
+				WREG32(DC_HPD4_CONTROL, tmp);
+				break;
+				/* DCE 3.2 */
+			case RADEON_HPD_5:
+				WREG32(DC_HPD5_CONTROL, tmp);
+				break;
+			case RADEON_HPD_6:
+				WREG32(DC_HPD6_CONTROL, tmp);
+				break;
+			default:
+				break;
+			}
+		} else {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			default:
+				break;
+			}
+		}
+		enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+void r600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (ASIC_IS_DCE3(rdev)) {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HPD1_CONTROL, 0);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HPD2_CONTROL, 0);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HPD3_CONTROL, 0);
+				break;
+			case RADEON_HPD_4:
+				WREG32(DC_HPD4_CONTROL, 0);
+				break;
+				/* DCE 3.2 */
+			case RADEON_HPD_5:
+				WREG32(DC_HPD5_CONTROL, 0);
+				break;
+			case RADEON_HPD_6:
+				WREG32(DC_HPD6_CONTROL, 0);
+				break;
+			default:
+				break;
+			}
+		} else {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+				break;
+			default:
+				break;
+			}
+		}
+		disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+/*
+ * R600 PCIE GART
+ */
+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	/* flush hdp cache so updates hit vram */
+	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+	    !(rdev->flags & RADEON_IS_AGP)) {
+		volatile uint32_t *ptr = rdev->gart.ptr;
+		u32 tmp;
+
+		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
+		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+		 * This seems to cause problems on some AGP cards. Just use the old
+		 * method for them.
+		 */
+		WREG32(HDP_DEBUG1, 0);
+		tmp = *ptr;
+	} else
+		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+		if (tmp == 2) {
+			DRM_ERROR("[drm] r600 flush TLB failed\n");
+			return;
+		}
+		if (tmp) {
+			return;
+		}
+		udelay(1);
+	}
+}
+
+int r600_pcie_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		DRM_ERROR("R600 PCIE GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+static int r600_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	for (i = 1; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	r600_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void r600_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Disable all tables */
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	/* Disable L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup L1 TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void r600_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	r600_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+static void r600_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+int r600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+static void r600_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture (doesn't exist before R600) */
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	rv515_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+/**
+ * r600_vram_gtt_location - try to find VRAM & GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place VRAM at same place as in CPU (PCI)
+ * address space as some GPU seems to have issue when we reprogram at
+ * different address space.
+ *
+ * If there is not enough space to fit the unvisible VRAM after the
+ * aperture then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+ * them to be in one from GPU point of view so that we can program GPU to
+ * catch access outside them (weird GPU policy see ??).
+ *
+ * This function will never fails, worst case are limiting VRAM or GTT.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ */
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_bf, size_af;
+
+	if (mc->mc_vram_size > 0xE0000000) {
+		/* leave room for at least 512M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xE0000000;
+		mc->mc_vram_size = 0xE0000000;
+	}
+	if (rdev->flags & RADEON_IS_AGP) {
+		size_bf = mc->gtt_start;
+		size_af = 0xFFFFFFFF - mc->gtt_end;
+		if (size_bf > size_af) {
+			if (mc->mc_vram_size > size_bf) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_bf;
+				mc->mc_vram_size = size_bf;
+			}
+			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+		} else {
+			if (mc->mc_vram_size > size_af) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_af;
+				mc->mc_vram_size = size_af;
+			}
+			mc->vram_start = mc->gtt_end + 1;
+		}
+		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+		dev_info(rdev->dev, "VRAM: %juM 0x%08jX - 0x%08jX (%juM used)\n",
+				(uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start,
+				(uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20);
+	} else {
+		u64 base = 0;
+		if (rdev->flags & RADEON_IS_IGP) {
+			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+			base <<= 24;
+		}
+		radeon_vram_location(rdev, &rdev->mc, base);
+		rdev->mc.gtt_base_align = 0;
+		radeon_gtt_location(rdev, mc);
+	}
+}
+
+static int r600_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+	/* Setup GPU memory space */
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r600_vram_gtt_location(rdev, &rdev->mc);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		rs690_pm_info(rdev);
+		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	}
+	radeon_update_bandwidth_info(rdev);
+	return 0;
+}
+
+int r600_vram_scratch_init(struct radeon_device *rdev)
+{
+	int r;
+	void *vram_scratch_ptr_ptr; /* FreeBSD: to please GCC 4.2. */
+
+	if (rdev->vram_scratch.robj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
+				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+				     NULL, &rdev->vram_scratch.robj);
+		if (r) {
+			return r;
+		}
+	}
+
+	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+	if (unlikely(r != 0)) {
+		radeon_bo_unref(&rdev->vram_scratch.robj);
+		return r;
+	}
+	r = radeon_bo_pin(rdev->vram_scratch.robj,
+			  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->vram_scratch.robj);
+		radeon_bo_unref(&rdev->vram_scratch.robj);
+		return r;
+	}
+	vram_scratch_ptr_ptr = &rdev->vram_scratch.ptr;
+	r = radeon_bo_kmap(rdev->vram_scratch.robj,
+				vram_scratch_ptr_ptr);
+	if (r)
+		radeon_bo_unpin(rdev->vram_scratch.robj);
+	radeon_bo_unreserve(rdev->vram_scratch.robj);
+	if (r)
+		radeon_bo_unref(&rdev->vram_scratch.robj);
+
+	return r;
+}
+
+void r600_vram_scratch_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->vram_scratch.robj == NULL) {
+		return;
+	}
+	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->vram_scratch.robj);
+		radeon_bo_unpin(rdev->vram_scratch.robj);
+		radeon_bo_unreserve(rdev->vram_scratch.robj);
+	}
+	radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
+/* We doesn't check that the GPU really needs a reset we simply do the
+ * reset, it's up to the caller to determine if the GPU needs one. We
+ * might add an helper function to check that.
+ */
+static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
+{
+	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
+				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
+				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
+				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
+				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
+				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
+				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
+				S_008010_GUI_ACTIVE(1);
+	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
+			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
+			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
+			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
+			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
+			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
+			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
+			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
+	u32 tmp;
+
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		return;
+
+	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
+		RREG32(R_008010_GRBM_STATUS));
+	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
+		RREG32(R_008014_GRBM_STATUS2));
+	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS      = 0x%08X\n",
+		RREG32(R_000E50_SRBM_STATUS));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		RREG32(CP_STAT));
+
+	/* Disable CP parsing/prefetching */
+	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+	/* Check if any of the rendering block is busy and reset it */
+	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
+	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
+		tmp = S_008020_SOFT_RESET_CR(1) |
+			S_008020_SOFT_RESET_DB(1) |
+			S_008020_SOFT_RESET_CB(1) |
+			S_008020_SOFT_RESET_PA(1) |
+			S_008020_SOFT_RESET_SC(1) |
+			S_008020_SOFT_RESET_SMX(1) |
+			S_008020_SOFT_RESET_SPI(1) |
+			S_008020_SOFT_RESET_SX(1) |
+			S_008020_SOFT_RESET_SH(1) |
+			S_008020_SOFT_RESET_TC(1) |
+			S_008020_SOFT_RESET_TA(1) |
+			S_008020_SOFT_RESET_VC(1) |
+			S_008020_SOFT_RESET_VGT(1);
+		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+		RREG32(R_008020_GRBM_SOFT_RESET);
+		mdelay(15);
+		WREG32(R_008020_GRBM_SOFT_RESET, 0);
+	}
+	/* Reset CP (we always reset CP) */
+	tmp = S_008020_SOFT_RESET_CP(1);
+	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+	RREG32(R_008020_GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(R_008020_GRBM_SOFT_RESET, 0);
+
+	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
+		RREG32(R_008010_GRBM_STATUS));
+	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
+		RREG32(R_008014_GRBM_STATUS2));
+	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS      = 0x%08X\n",
+		RREG32(R_000E50_SRBM_STATUS));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		RREG32(CP_STAT));
+
+}
+
+static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+		return;
+
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+
+	/* Disable DMA */
+	tmp = RREG32(DMA_RB_CNTL);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, tmp);
+
+	/* Reset dma */
+	if (rdev->family >= CHIP_RV770)
+		WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+	else
+		WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+}
+
+static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct rv515_mc_save save;
+
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+		reset_mask &= ~RADEON_RESET_DMA;
+
+	if (reset_mask == 0)
+		return 0;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+		r600_gpu_soft_reset_gfx(rdev);
+
+	if (reset_mask & RADEON_RESET_DMA)
+		r600_gpu_soft_reset_dma(rdev);
+
+	/* Wait a little for things to settle down */
+	mdelay(1);
+
+	rv515_mc_resume(rdev, &save);
+	return 0;
+}
+
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 srbm_status;
+	u32 grbm_status;
+	u32 grbm_status2;
+
+	srbm_status = RREG32(R_000E50_SRBM_STATUS);
+	grbm_status = RREG32(R_008010_GRBM_STATUS);
+	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+	if (!G_008010_GUI_ACTIVE(grbm_status)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 dma_status_reg;
+
+	dma_status_reg = RREG32(DMA_STATUS_REG);
+	if (dma_status_reg & DMA_IDLE) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force ring activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
+{
+	return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+					  RADEON_RESET_COMPUTE |
+					  RADEON_RESET_DMA));
+}
+
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+			      u32 tiling_pipe_num,
+			      u32 max_rb_num,
+			      u32 total_max_rb_num,
+			      u32 disabled_rb_mask)
+{
+	u32 rendering_pipe_num, rb_num_width, req_rb_num;
+	u32 pipe_rb_ratio, pipe_rb_remain, tmp;
+	u32 data = 0, mask = 1 << (max_rb_num - 1);
+	unsigned i, j;
+
+	/* mask out the RBs that don't exist on that asic */
+	tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+	/* make sure at least one RB is available */
+	if ((tmp & 0xff) != 0xff)
+		disabled_rb_mask = tmp;
+
+	rendering_pipe_num = 1 << tiling_pipe_num;
+	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+	KASSERT(rendering_pipe_num >= req_rb_num, ("rendering_pipe_num < req_rb_num"));
+
+	pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+	pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+	if (rdev->family <= CHIP_RV740) {
+		/* r6xx/r7xx */
+		rb_num_width = 2;
+	} else {
+		/* eg+ */
+		rb_num_width = 4;
+	}
+
+	for (i = 0; i < max_rb_num; i++) {
+		if (!(mask & disabled_rb_mask)) {
+			for (j = 0; j < pipe_rb_ratio; j++) {
+				data <<= rb_num_width;
+				data |= max_rb_num - i - 1;
+			}
+			if (pipe_rb_remain) {
+				data <<= rb_num_width;
+				data |= max_rb_num - i - 1;
+				pipe_rb_remain--;
+			}
+		}
+		mask >>= 1;
+	}
+
+	return data;
+}
+
+int r600_count_pipe_bits(uint32_t val)
+{
+	return hweight32(val);
+}
+
+static void r600_gpu_init(struct radeon_device *rdev)
+{
+	u32 tiling_config;
+	u32 ramcfg;
+	u32 cc_rb_backend_disable;
+	u32 cc_gc_shader_pipe_config;
+	u32 tmp;
+	int i, j;
+	u32 sq_config;
+	u32 sq_gpr_resource_mgmt_1 = 0;
+	u32 sq_gpr_resource_mgmt_2 = 0;
+	u32 sq_thread_resource_mgmt = 0;
+	u32 sq_stack_resource_mgmt_1 = 0;
+	u32 sq_stack_resource_mgmt_2 = 0;
+	u32 disabled_rb_mask;
+
+	rdev->config.r600.tiling_group_size = 256;
+	switch (rdev->family) {
+	case CHIP_R600:
+		rdev->config.r600.max_pipes = 4;
+		rdev->config.r600.max_tile_pipes = 8;
+		rdev->config.r600.max_simds = 4;
+		rdev->config.r600.max_backends = 4;
+		rdev->config.r600.max_gprs = 256;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 256;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 16;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		rdev->config.r600.max_pipes = 2;
+		rdev->config.r600.max_tile_pipes = 2;
+		rdev->config.r600.max_simds = 3;
+		rdev->config.r600.max_backends = 1;
+		rdev->config.r600.max_gprs = 128;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 128;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 4;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		rdev->config.r600.max_pipes = 1;
+		rdev->config.r600.max_tile_pipes = 1;
+		rdev->config.r600.max_simds = 2;
+		rdev->config.r600.max_backends = 1;
+		rdev->config.r600.max_gprs = 128;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 128;
+		rdev->config.r600.max_hw_contexts = 4;
+		rdev->config.r600.max_gs_threads = 4;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 1;
+		break;
+	case CHIP_RV670:
+		rdev->config.r600.max_pipes = 4;
+		rdev->config.r600.max_tile_pipes = 4;
+		rdev->config.r600.max_simds = 4;
+		rdev->config.r600.max_backends = 4;
+		rdev->config.r600.max_gprs = 192;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 256;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 16;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	/* Setup tiling */
+	tiling_config = 0;
+	ramcfg = RREG32(RAMCFG);
+	switch (rdev->config.r600.max_tile_pipes) {
+	case 1:
+		tiling_config |= PIPE_TILING(0);
+		break;
+	case 2:
+		tiling_config |= PIPE_TILING(1);
+		break;
+	case 4:
+		tiling_config |= PIPE_TILING(2);
+		break;
+	case 8:
+		tiling_config |= PIPE_TILING(3);
+		break;
+	default:
+		break;
+	}
+	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+
+	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+	if (tmp > 3) {
+		tiling_config |= ROW_TILING(3);
+		tiling_config |= SAMPLE_SPLIT(3);
+	} else {
+		tiling_config |= ROW_TILING(tmp);
+		tiling_config |= SAMPLE_SPLIT(tmp);
+	}
+	tiling_config |= BANK_SWAPS(1);
+
+	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	tmp = R6XX_MAX_BACKENDS -
+		r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+	if (tmp < rdev->config.r600.max_backends) {
+		rdev->config.r600.max_backends = tmp;
+	}
+
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+	tmp = R6XX_MAX_PIPES -
+		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+	if (tmp < rdev->config.r600.max_pipes) {
+		rdev->config.r600.max_pipes = tmp;
+	}
+	tmp = R6XX_MAX_SIMDS -
+		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+	if (tmp < rdev->config.r600.max_simds) {
+		rdev->config.r600.max_simds = tmp;
+	}
+
+	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+					R6XX_MAX_BACKENDS, disabled_rb_mask);
+	tiling_config |= tmp << 16;
+	rdev->config.r600.backend_map = tmp;
+
+	rdev->config.r600.tile_config = tiling_config;
+	WREG32(GB_TILING_CONFIG, tiling_config);
+	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
+	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+	WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
+
+	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
+
+	/* Setup some CP states */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
+	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
+
+	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
+			     SYNC_WALKER | SYNC_ALIGNER));
+	/* Setup various GPU states */
+	if (rdev->family == CHIP_RV670)
+		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
+
+	tmp = RREG32(SX_DEBUG_1);
+	tmp |= SMX_EVENT_RELEASE;
+	if ((rdev->family > CHIP_R600))
+		tmp |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, tmp);
+
+	if (((rdev->family) == CHIP_R600) ||
+	    ((rdev->family) == CHIP_RV630) ||
+	    ((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
+	} else {
+		WREG32(DB_DEBUG, 0);
+	}
+	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
+			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+	WREG32(VGT_NUM_INSTANCES, 0);
+
+	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
+
+	tmp = RREG32(SQ_MS_FIFO_SIZES);
+	if (((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		tmp = (CACHE_FIFO_SIZE(0xa) |
+		       FETCH_FIFO_HIWATER(0xa) |
+		       DONE_FIFO_HIWATER(0xe0) |
+		       ALU_UPDATE_FIFO_HIWATER(0x8));
+	} else if (((rdev->family) == CHIP_R600) ||
+		   ((rdev->family) == CHIP_RV630)) {
+		tmp &= ~DONE_FIFO_HIWATER(0xff);
+		tmp |= DONE_FIFO_HIWATER(0x4);
+	}
+	WREG32(SQ_MS_FIFO_SIZES, tmp);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (DX9_CONSTS |
+		      VC_ENABLE |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	if ((rdev->family) == CHIP_R600) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
+					  NUM_VS_GPRS(124) |
+					  NUM_CLAUSE_TEMP_GPRS(4));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
+					  NUM_ES_GPRS(0));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
+					   NUM_VS_THREADS(48) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(4));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
+					    NUM_VS_STACK_ENTRIES(128));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
+					    NUM_ES_STACK_ENTRIES(0));
+	} else if (((rdev->family) == CHIP_RV610) ||
+		   ((rdev->family) == CHIP_RV620) ||
+		   ((rdev->family) == CHIP_RS780) ||
+		   ((rdev->family) == CHIP_RS880)) {
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
+					  NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
+					    NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
+					    NUM_ES_STACK_ENTRIES(16));
+	} else if (((rdev->family) == CHIP_RV630) ||
+		   ((rdev->family) == CHIP_RV635)) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
+					  NUM_ES_GPRS(18));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
+					    NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
+					    NUM_ES_STACK_ENTRIES(16));
+	} else if ((rdev->family) == CHIP_RV670) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
+					  NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
+					    NUM_VS_STACK_ENTRIES(64));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
+					    NUM_ES_STACK_ENTRIES(64));
+	}
+
+	WREG32(SQ_CONFIG, sq_config);
+	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
+	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+
+	if (((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
+	} else {
+		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
+	}
+
+	/* More default values. 2D/3D driver should adjust as needed */
+	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
+					 S1_X(0x4) | S1_Y(0xc)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
+					 S1_X(0x2) | S1_Y(0x2) |
+					 S2_X(0xa) | S2_Y(0x6) |
+					 S3_X(0x6) | S3_Y(0xa)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
+					     S1_X(0x4) | S1_Y(0xc) |
+					     S2_X(0x1) | S2_Y(0x6) |
+					     S3_X(0xa) | S3_Y(0xe)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
+					     S5_X(0x0) | S5_Y(0x0) |
+					     S6_X(0xb) | S6_Y(0x4) |
+					     S7_X(0x7) | S7_Y(0x8)));
+
+	WREG32(VGT_STRMOUT_EN, 0);
+	tmp = rdev->config.r600.max_pipes * 16;
+	switch (rdev->family) {
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		tmp += 32;
+		break;
+	case CHIP_RV670:
+		tmp += 128;
+		break;
+	default:
+		break;
+	}
+	if (tmp > 256) {
+		tmp = 256;
+	}
+	WREG32(VGT_ES_PER_GS, 128);
+	WREG32(VGT_GS_PER_ES, tmp);
+	WREG32(VGT_GS_PER_VS, 2);
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+	WREG32(VGT_STRMOUT_EN, 0);
+	WREG32(SX_MISC, 0);
+	WREG32(PA_SC_MODE_CNTL, 0);
+	WREG32(PA_SC_AA_CONFIG, 0);
+	WREG32(PA_SC_LINE_STIPPLE, 0);
+	WREG32(SPI_INPUT_Z, 0);
+	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	/* Clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	switch (rdev->family) {
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		tmp = TC_L2_SIZE(8);
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		tmp = TC_L2_SIZE(4);
+		break;
+	case CHIP_R600:
+		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
+		break;
+	default:
+		tmp = TC_L2_SIZE(0);
+		break;
+	}
+	WREG32(TC_CNTL, tmp);
+
+	tmp = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, tmp);
+
+	tmp = RREG32(ARB_POP);
+	tmp |= ENABLE_TC128;
+	WREG32(ARB_POP, tmp);
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+			       NUM_CLIP_SEQ(3)));
+	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+	WREG32(VC_ENHANCE, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
+{
+	u32 r;
+
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	r = RREG32(PCIE_PORT_DATA);
+	return r;
+}
+
+void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	WREG32(PCIE_PORT_DATA, (v));
+	(void)RREG32(PCIE_PORT_DATA);
+}
+
+/*
+ * CP & Ring
+ */
+void r600_cp_stop(struct radeon_device *rdev)
+{
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+	WREG32(SCRATCH_UMSK, 0);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+}
+
+int r600_init_microcode(struct radeon_device *rdev)
+{
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, rlc_req_size;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	switch (rdev->family) {
+	case CHIP_R600:
+		chip_name = "R600";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV610:
+		chip_name = "RV610";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV630:
+		chip_name = "RV630";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV620:
+		chip_name = "RV620";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV635:
+		chip_name = "RV635";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV670:
+		chip_name = "RV670";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RS780:
+	case CHIP_RS880:
+		chip_name = "RS780";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV770:
+		chip_name = "RV770";
+		rlc_chip_name = "R700";
+		break;
+	case CHIP_RV730:
+	case CHIP_RV740:
+		chip_name = "RV730";
+		rlc_chip_name = "R700";
+		break;
+	case CHIP_RV710:
+		chip_name = "RV710";
+		rlc_chip_name = "R700";
+		break;
+	case CHIP_CEDAR:
+		chip_name = "CEDAR";
+		rlc_chip_name = "CEDAR";
+		break;
+	case CHIP_REDWOOD:
+		chip_name = "REDWOOD";
+		rlc_chip_name = "REDWOOD";
+		break;
+	case CHIP_JUNIPER:
+		chip_name = "JUNIPER";
+		rlc_chip_name = "JUNIPER";
+		break;
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		chip_name = "CYPRESS";
+		rlc_chip_name = "CYPRESS";
+		break;
+	case CHIP_PALM:
+		chip_name = "PALM";
+		rlc_chip_name = "SUMO";
+		break;
+	case CHIP_SUMO:
+		chip_name = "SUMO";
+		rlc_chip_name = "SUMO";
+		break;
+	case CHIP_SUMO2:
+		chip_name = "SUMO2";
+		rlc_chip_name = "SUMO";
+		break;
+	default: panic("%s: Unsupported family %d", __func__, rdev->family);
+	}
+
+	if (rdev->family >= CHIP_CEDAR) {
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+	} else if (rdev->family >= CHIP_RV770) {
+		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
+		me_req_size = R700_PM4_UCODE_SIZE * 4;
+		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
+	} else {
+		pfp_req_size = PFP_UCODE_SIZE * 4;
+		me_req_size = PM4_UCODE_SIZE * 12;
+		rlc_req_size = RLC_UCODE_SIZE * 4;
+	}
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+	err = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name);
+	rdev->pfp_fw = firmware_get(fw_name);
+	if (rdev->pfp_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->pfp_fw->datasize != pfp_req_size) {
+		DRM_ERROR(
+		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->datasize, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name);
+	rdev->me_fw = firmware_get(fw_name);
+	if (rdev->me_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->me_fw->datasize != me_req_size) {
+		DRM_ERROR(
+		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", rlc_chip_name);
+	rdev->rlc_fw = firmware_get(fw_name);
+	if (rdev->rlc_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->rlc_fw->datasize != rlc_req_size) {
+		DRM_ERROR(
+		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+
+out:
+	if (err) {
+		if (err != -EINVAL)
+			DRM_ERROR(
+			       "r600_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		if (rdev->pfp_fw != NULL) {
+			firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD);
+			rdev->pfp_fw = NULL;
+		}
+		if (rdev->me_fw != NULL) {
+			firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
+			rdev->me_fw = NULL;
+		}
+		if (rdev->rlc_fw != NULL) {
+			firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD);
+			rdev->rlc_fw = NULL;
+		}
+	}
+	return err;
+}
+
+/**
+ * r600_fini_microcode - drop the firmwares image references
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Drop the pfp, me and rlc firmwares image references.
+ * Called at driver shutdown.
+ */
+void r600_fini_microcode(struct radeon_device *rdev)
+{
+
+	if (rdev->pfp_fw != NULL) {
+		firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD);
+		rdev->pfp_fw = NULL;
+	}
+
+	if (rdev->me_fw != NULL) {
+		firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
+		rdev->me_fw = NULL;
+	}
+
+	if (rdev->rlc_fw != NULL) {
+		firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD);
+		rdev->rlc_fw = NULL;
+	}
+}
+
+static int r600_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r600_cp_stop(rdev);
+
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	WREG32(CP_ME_RAM_WADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
+		WREG32(CP_ME_RAM_DATA,
+		       be32_to_cpup(fw_data++));
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA,
+		       be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+int r600_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+	uint32_t cp_me;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	if (rdev->family >= CHIP_RV770) {
+		radeon_ring_write(ring, 0x0);
+		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
+	} else {
+		radeon_ring_write(ring, 0x3);
+		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
+	}
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	cp_me = 0xff;
+	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
+	return 0;
+}
+
+int r600_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	/* Set ring buffer size */
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB_CNTL, tmp);
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+	WREG32(CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB_RPTR_ADDR,
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB_CNTL, tmp);
+
+	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+	ring->rptr = RREG32(CP_RB_RPTR);
+
+	r600_cp_start(rdev);
+	ring->ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+	return 0;
+}
+
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
+{
+	u32 rb_bufsz;
+	int r;
+
+	/* Align ring size */
+	rb_bufsz = drm_order(ring_size / 8);
+	ring_size = (1 << (rb_bufsz + 1)) * 4;
+	ring->ring_size = ring_size;
+	ring->align_mask = 16 - 1;
+
+	if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+		if (r) {
+			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+			ring->rptr_save_reg = 0;
+		}
+	}
+}
+
+void r600_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r600_cp_stop(rdev);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, rb_cntl);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	u32 rb_cntl, dma_cntl, ib_cntl;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset dma */
+	if (rdev->family >= CHIP_RV770)
+		WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+	else
+		WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+	/* Set ring buffer size in dwords */
+	rb_bufsz = drm_order(ring->ring_size / 4);
+	rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+	rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+	WREG32(DMA_RB_CNTL, rb_cntl);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(DMA_RB_RPTR, 0);
+	WREG32(DMA_RB_WPTR, 0);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(DMA_RB_RPTR_ADDR_HI,
+	       upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+	WREG32(DMA_RB_RPTR_ADDR_LO,
+	       ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+	if (rdev->wb.enabled)
+		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+	/* enable DMA IBs */
+	ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+	ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+	WREG32(DMA_IB_CNTL, ib_cntl);
+
+	dma_cntl = RREG32(DMA_CNTL);
+	dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+	WREG32(DMA_CNTL, dma_cntl);
+
+	if (rdev->family >= CHIP_RV770)
+		WREG32(DMA_MODE, 1);
+
+	ring->wptr = 0;
+	WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+	ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+	WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+	ring->ready = true;
+
+	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+	r600_dma_stop(rdev);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+void r600_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->scratch.num_reg = 7;
+	rdev->scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 3);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
+		radeon_scratch_free(rdev, scratch);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+			  ring->idx, scratch, tmp);
+		r = -EINVAL;
+	}
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	unsigned i;
+	int r;
+	volatile uint32_t *ptr = rdev->vram_scratch.ptr;
+	u32 tmp;
+
+	if (!ptr) {
+		DRM_ERROR("invalid vram scratch pointer\n");
+		return -EINVAL;
+	}
+
+	tmp = 0xCAFEDEAD;
+	*ptr = tmp;
+
+	r = radeon_ring_lock(rdev, ring, 4);
+	if (r) {
+		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+		return r;
+	}
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+	radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = *ptr;
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
+void r600_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+	if (rdev->wb.use_event) {
+		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+		/* flush read cache over gart */
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
+					PACKET3_VC_ACTION_ENA |
+					PACKET3_SH_ACTION_ENA);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+		/* EVENT_WRITE_EOP - flush caches, send int */
+		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+		radeon_ring_write(ring, addr & 0xffffffff);
+		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+		radeon_ring_write(ring, fence->seq);
+		radeon_ring_write(ring, 0);
+	} else {
+		/* flush read cache over gart */
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
+					PACKET3_VC_ACTION_ENA |
+					PACKET3_SH_ACTION_ENA);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+		/* wait for 3D idle clean */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+		/* Emit fence sequence & fire IRQ */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+		radeon_ring_write(ring, fence->seq);
+		/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+		radeon_ring_write(ring, RB_INT_STAT);
+	}
+}
+
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *ring,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+	if (rdev->family < CHIP_CAYMAN)
+		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+	radeon_ring_write(ring, addr & 0xffffffff);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
+
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* write the fence */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	radeon_ring_write(ring, lower_32_bits(fence->seq));
+	/* generate an interrupt */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait)
+{
+	u64 addr = semaphore->gpu_addr;
+	u32 s = emit_wait ? 0 : 1;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
+int r600_copy_blit(struct radeon_device *rdev,
+		   uint64_t src_offset,
+		   uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	struct radeon_sa_bo *vb = NULL;
+	int r;
+
+	r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
+	if (r) {
+		return r;
+	}
+	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+	r600_blit_done_copy(rdev, fence, vb, sem);
+	return 0;
+}
+
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		  struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFE)
+			cur_size_in_dw = 0xFFFE;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+					 (upper_32_bits(src_offset) & 0xff)));
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size)
+{
+	/* FIXME: implement */
+	return 0;
+}
+
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
+{
+	/* FIXME: implement */
+}
+
+static int r600_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2 link */
+	r600_pcie_gen2_enable(rdev);
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	r600_mc_program(rdev);
+	if (rdev->flags & RADEON_IS_AGP) {
+		r600_agp_enable(rdev);
+	} else {
+		r = r600_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	r600_gpu_init(rdev);
+	r = r600_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy.copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = r600_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = r600_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+void r600_vga_set_state(struct radeon_device *rdev, bool state)
+{
+	uint32_t temp;
+
+	temp = RREG32(CONFIG_CNTL);
+	if (state == false) {
+		temp &= ~(1<<0);
+		temp |= (1<<1);
+	} else {
+		temp &= ~(1<<1);
+	}
+	WREG32(CONFIG_CNTL, temp);
+}
+
+int r600_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	rdev->accel_working = true;
+	r = r600_startup(rdev);
+	if (r) {
+		DRM_ERROR("r600 startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+}
+
+int r600_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r600_cp_stop(rdev);
+	r600_dma_stop(rdev);
+	r600_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	r600_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int r600_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (r600_debugfs_mc_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for mc !\n");
+	}
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	r = r600_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = r600_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r600_cp_fini(rdev);
+		r600_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		r600_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the ucode is missing. */
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		DRM_ERROR("radeon: ucode required for R600+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void r600_fini(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r600_blit_fini(rdev);
+	r600_cp_fini(rdev);
+	r600_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	r600_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	r600_fini_microcode(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+
+/*
+ * CS stuff
+ */
+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 next_rptr;
+
+	if (ring->rptr_save_reg) {
+		next_rptr = ring->wptr + 3 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg -
+					 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	} else if (rdev->wb.enabled) {
+		next_rptr = ring->wptr + 5 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+		radeon_ring_write(ring, next_rptr);
+		radeon_ring_write(ring, 0);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		goto free_scratch;
+	}
+	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	ib.ptr[2] = 0xDEADBEEF;
+	ib.length_dw = 3;
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		goto free_ib;
+	}
+	r = radeon_fence_wait(ib.fence, false);
+	if (r) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto free_ib;
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+free_ib:
+	radeon_ib_free(rdev, &ib);
+free_scratch:
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	unsigned i;
+	int r;
+	volatile uint32_t *ptr = rdev->vram_scratch.ptr;
+	u32 tmp = 0;
+
+	if (!ptr) {
+		DRM_ERROR("invalid vram scratch pointer\n");
+		return -EINVAL;
+	}
+
+	tmp = 0xCAFEDEAD;
+	*ptr = tmp;
+
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+	ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+	ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+	ib.ptr[3] = 0xDEADBEEF;
+	ib.length_dw = 4;
+
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		return r;
+	}
+	r = radeon_fence_wait(ib.fence, false);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		return r;
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = *ptr;
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+		r = -EINVAL;
+	}
+	radeon_ib_free(rdev, &ib);
+	return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
+ * the same as the CP ring buffer, but in reverse.  Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes.  As the host irq handler processes interrupts, it
+ * increments the rptr.  When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
+
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+	u32 rb_bufsz;
+
+	/* Align ring size */
+	rb_bufsz = drm_order(ring_size / 4);
+	ring_size = (1 << rb_bufsz) * 4;
+	rdev->ih.ring_size = ring_size;
+	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
+	rdev->ih.rptr = 0;
+}
+
+int r600_ih_ring_alloc(struct radeon_device *rdev)
+{
+	int r;
+	void *ring_ptr; /* FreeBSD: to please GCC 4.2. */
+
+	/* Allocate ring buffer */
+	if (rdev->ih.ring_obj == NULL) {
+		r = radeon_bo_create(rdev, rdev->ih.ring_size,
+				     PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT,
+				     NULL, &rdev->ih.ring_obj);
+		if (r) {
+			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+		if (unlikely(r != 0)) {
+			radeon_bo_unref(&rdev->ih.ring_obj);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->ih.ring_obj,
+				  RADEON_GEM_DOMAIN_GTT,
+				  &rdev->ih.gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->ih.ring_obj);
+			radeon_bo_unref(&rdev->ih.ring_obj);
+			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+			return r;
+		}
+		ring_ptr = &rdev->ih.ring;
+		r = radeon_bo_kmap(rdev->ih.ring_obj,
+				   ring_ptr);
+		if (r)
+			radeon_bo_unpin(rdev->ih.ring_obj);
+		radeon_bo_unreserve(rdev->ih.ring_obj);
+		if (r) {
+			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+			radeon_bo_unref(&rdev->ih.ring_obj);
+			return r;
+		}
+	}
+	return 0;
+}
+
+void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+	int r;
+	if (rdev->ih.ring_obj) {
+		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->ih.ring_obj);
+			radeon_bo_unpin(rdev->ih.ring_obj);
+			radeon_bo_unreserve(rdev->ih.ring_obj);
+		}
+		radeon_bo_unref(&rdev->ih.ring_obj);
+		rdev->ih.ring = NULL;
+		rdev->ih.ring_obj = NULL;
+	}
+}
+
+void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+	if ((rdev->family >= CHIP_RV770) &&
+	    (rdev->family <= CHIP_RV740)) {
+		/* r7xx asics need to soft reset RLC before halting */
+		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+		RREG32(SRBM_SOFT_RESET);
+		mdelay(15);
+		WREG32(SRBM_SOFT_RESET, 0);
+		RREG32(SRBM_SOFT_RESET);
+	}
+
+	WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_init(struct radeon_device *rdev)
+{
+	u32 i;
+	const __be32 *fw_data;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	r600_rlc_stop(rdev);
+
+	WREG32(RLC_HB_CNTL, 0);
+
+	if (rdev->family == CHIP_ARUBA) {
+		WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+		WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+	}
+	if (rdev->family <= CHIP_CAYMAN) {
+		WREG32(RLC_HB_BASE, 0);
+		WREG32(RLC_HB_RPTR, 0);
+		WREG32(RLC_HB_WPTR, 0);
+	}
+	if (rdev->family <= CHIP_CAICOS) {
+		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+	}
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	fw_data = (const __be32 *)rdev->rlc_fw->data;
+	if (rdev->family >= CHIP_ARUBA) {
+		for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else if (rdev->family >= CHIP_CAYMAN) {
+		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else if (rdev->family >= CHIP_CEDAR) {
+		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else if (rdev->family >= CHIP_RV770) {
+		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else {
+		for (i = 0; i < RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	r600_rlc_start(rdev);
+
+	return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	rdev->ih.enabled = true;
+}
+
+void r600_disable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	rdev->ih.enabled = false;
+	rdev->ih.rptr = 0;
+}
+
+static void r600_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(DxMODE_INT_MASK, 0);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
+	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		if (ASIC_IS_DCE32(rdev)) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+		} else {
+			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+			tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+		}
+	} else {
+		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+		tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+	}
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+	/* allocate ring */
+	ret = r600_ih_ring_alloc(rdev);
+	if (ret)
+		return ret;
+
+	/* disable irqs */
+	r600_disable_interrupts(rdev);
+
+	/* init rlc */
+	ret = r600_rlc_init(rdev);
+	if (ret) {
+		r600_ih_ring_fini(rdev);
+		return ret;
+	}
+
+	/* setup interrupt control */
+	/* set dummy read address to ring address */
+	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+		      IH_WPTR_OVERFLOW_CLEAR |
+		      (rb_bufsz << 1));
+
+	if (rdev->wb.enabled)
+		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+	/* set the writeback address whether it's enabled or not */
+	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+	/* RPTR_REARM only works if msi's are enabled */
+	if (rdev->msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	/* force the active interrupt state to all disabled */
+	if (rdev->family >= CHIP_CEDAR)
+		evergreen_disable_interrupt_state(rdev);
+	else
+		r600_disable_interrupt_state(rdev);
+
+	/* at this point everything should be setup correctly to enable master */
+	pci_enable_busmaster(rdev->dev);
+
+	/* enable irqs */
+	r600_enable_interrupts(rdev);
+
+	return ret;
+}
+
+void r600_irq_suspend(struct radeon_device *rdev)
+{
+	r600_irq_disable(rdev);
+	r600_rlc_stop(rdev);
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+	r600_irq_suspend(rdev);
+	r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 mode_int = 0;
+	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+	u32 grbm_int_cntl = 0;
+	u32 hdmi0, hdmi1;
+	u32 d1grph = 0, d2grph = 0;
+	u32 dma_cntl;
+
+	if (!rdev->irq.installed) {
+		DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		r600_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		r600_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	if (ASIC_IS_DCE3(rdev)) {
+		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		if (ASIC_IS_DCE32(rdev)) {
+			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+			hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+			hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+		} else {
+			hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		}
+	} else {
+		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int\n");
+		cp_int_cntl |= RB_INT_ENABLE;
+		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		DRM_DEBUG("r600_irq_set: vblank 0\n");
+		mode_int |= D1MODE_VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		DRM_DEBUG("r600_irq_set: vblank 1\n");
+		mode_int |= D2MODE_VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("r600_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("r600_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("r600_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("r600_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("r600_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("r600_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.afmt[0]) {
+		DRM_DEBUG("r600_irq_set: hdmi 0\n");
+		hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[1]) {
+		DRM_DEBUG("r600_irq_set: hdmi 0\n");
+		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+
+	WREG32(CP_INT_CNTL, cp_int_cntl);
+	WREG32(DMA_CNTL, dma_cntl);
+	WREG32(DxMODE_INT_MASK, mode_int);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(DC_HPD1_INT_CONTROL, hpd1);
+		WREG32(DC_HPD2_INT_CONTROL, hpd2);
+		WREG32(DC_HPD3_INT_CONTROL, hpd3);
+		WREG32(DC_HPD4_INT_CONTROL, hpd4);
+		if (ASIC_IS_DCE32(rdev)) {
+			WREG32(DC_HPD5_INT_CONTROL, hpd5);
+			WREG32(DC_HPD6_INT_CONTROL, hpd6);
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
+		} else {
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
+		}
+	} else {
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
+	}
+
+	return 0;
+}
+
+static void r600_irq_ack(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+		if (ASIC_IS_DCE32(rdev)) {
+			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
+			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
+		} else {
+			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
+		}
+	} else {
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+		rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+		rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
+	}
+	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
+
+	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
+		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
+		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD1_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD1_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD2_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD2_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD3_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD3_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (ASIC_IS_DCE32(rdev)) {
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
+			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
+			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+		}
+	} else {
+		if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
+			tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+			if (ASIC_IS_DCE3(rdev)) {
+				tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
+				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+				WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+			} else {
+				tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
+				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+				WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+			}
+		}
+	}
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+	r600_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	r600_irq_ack(rdev);
+	r600_disable_interrupt_state(rdev);
+}
+
+static u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+/*        r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [127:60]  - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id  src_data  description
+ *      1         0  D1 Vblank
+ *      1         1  D1 Vline
+ *      5         0  D2 Vblank
+ *      5         1  D2 Vline
+ *     19         0  FP Hot plug detection A
+ *     19         1  FP Hot plug detection B
+ *     19         2  DAC A auto-detection
+ *     19         3  DAC B auto-detection
+ *     21         4  HDMI block A
+ *     21         5  HDMI block B
+ *    176         -  CP_INT RB
+ *    177         -  CP_INT IB1
+ *    178         -  CP_INT IB2
+ *    181         -  EOP Interrupt
+ *    233         -  GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+irqreturn_t r600_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data;
+	u32 ring_index;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	/* No MSIs, need a dummy read to flush PCI DMAs */
+	if (!rdev->msi_enabled)
+		RREG32(IH_RB_WPTR);
+
+	wptr = r600_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	r600_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[0]))
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D1 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D1 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[1]))
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D2 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D2 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 19: /* HPD/DAC hotplug */
+			switch (src_data) {
+			case 0:
+				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD1\n");
+				}
+				break;
+			case 1:
+				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD2\n");
+				}
+				break;
+			case 4:
+				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD3\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD4\n");
+				}
+				break;
+			case 10:
+				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD5\n");
+				}
+				break;
+			case 12:
+				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD6\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 21: /* hdmi */
+			switch (src_data) {
+			case 4:
+				if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI0\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI1\n");
+				}
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 176: /* CP_INT in ring buffer */
+		case 177: /* CP_INT in IB1 */
+		case 178: /* CP_INT in IB2 */
+			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+	}
+	if (queue_hotplug)
+		taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
+	if (queue_hdmi)
+		taskqueue_enqueue(rdev->tq, &rdev->audio_work);
+	rdev->ih.rptr = rptr;
+	WREG32(IH_RB_RPTR, rdev->ih.rptr);
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = r600_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int r600_debugfs_mc_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
+	DREG32_SYS(m, rdev, VM_L2_STATUS);
+	return 0;
+}
+
+static struct drm_info_list r600_mc_info_list[] = {
+	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r600_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
+#else
+	return 0;
+#endif
+}
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
+	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
+	 * This seems to cause problems on some AGP cards. Just use the old
+	 * method for them.
+	 */
+	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
+		volatile uint32_t *ptr = rdev->vram_scratch.ptr;
+		u32 tmp;
+
+		WREG32(HDP_DEBUG1, 0);
+		tmp = *ptr;
+	} else
+		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+	u32 link_width_cntl, mask, target_reg;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* FIXME wait for idle */
+
+	switch (lanes) {
+	case 0:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+		break;
+	case 1:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+		break;
+	case 2:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+		break;
+	case 4:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+		break;
+	case 8:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+		break;
+	case 12:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+		break;
+	case 16:
+	default:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+		break;
+	}
+
+	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+		return;
+
+	if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+		return;
+
+	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+			     RADEON_PCIE_LC_RECONFIG_NOW |
+			     R600_PCIE_LC_RENEGOTIATE_EN |
+			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+	link_width_cntl |= mask;
+
+	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+        /* some northbridges can renegotiate the link rather than requiring                                  
+         * a complete re-config.                                                                             
+         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
+         */
+        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
+		link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
+        else
+		link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
+
+	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+						       RADEON_PCIE_LC_RECONFIG_NOW));
+
+        if (rdev->family >= CHIP_RV770)
+		target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
+        else
+		target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
+
+        /* wait for lane set to complete */
+        link_width_cntl = RREG32(target_reg);
+        while (link_width_cntl == 0xffffffff)
+		link_width_cntl = RREG32(target_reg);
+
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+	u32 link_width_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return 0;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return 0;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return 0;
+
+	/* FIXME wait for idle */
+
+	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+		return 0;
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+	u16 link_cntl2;
+	u32 mask;
+	int ret;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* only RV6xx+ chips are supported */
+	if (rdev->family <= CHIP_R600)
+		return;
+
+	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+	if (ret != 0)
+		return;
+
+	if (!(mask & DRM_PCIE_SPEED_50))
+		return;
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if (speed_cntl & LC_CURRENT_DATA_RATE) {
+		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+		return;
+	}
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	/* 55 nm r6xx asics */
+	if ((rdev->family == CHIP_RV670) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RV635)) {
+		/* advertise upconfig capability */
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+					     LC_RECONFIG_ARC_MISSING_ESCAPE);
+			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		} else {
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		}
+	}
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		/* 55 nm r6xx asics */
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			WREG32(MM_CFGREGS_CNTL, 0x8);
+			link_cntl2 = RREG32(0x4088);
+			WREG32(MM_CFGREGS_CNTL, 0);
+			/* not supported yet */
+			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+				return;
+		}
+
+		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+			training_cntl &= ~LC_POINT_7_PLUS_EN;
+			WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+		} else {
+			speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+			WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+		}
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
+
+/**
+ * r600_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (R6xx-cayman).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	sx_xlock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	sx_xunlock(&rdev->gpu_clock_mutex);
+	return clock;
+}


Property changes on: trunk/sys/dev/drm2/radeon/r600.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_audio.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_audio.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_audio.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,259 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_audio.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_reg.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * check if enc_priv stores radeon_encoder_atom_dig
+ */
+static bool radeon_dig_encoder(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		return true;
+	}
+	return false;
+}
+
+/*
+ * check if the chipset is supported
+ */
+static int r600_audio_chipset_supported(struct radeon_device *rdev)
+{
+	return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
+		|| rdev->family == CHIP_RS600
+		|| rdev->family == CHIP_RS690
+		|| rdev->family == CHIP_RS740;
+}
+
+struct r600_audio r600_audio_status(struct radeon_device *rdev)
+{
+	struct r600_audio status;
+	uint32_t value;
+
+	value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
+
+	/* number of channels */
+	status.channels = (value & 0x7) + 1;
+
+	/* bits per sample */
+	switch ((value & 0xF0) >> 4) {
+	case 0x0:
+		status.bits_per_sample = 8;
+		break;
+	case 0x1:
+		status.bits_per_sample = 16;
+		break;
+	case 0x2:
+		status.bits_per_sample = 20;
+		break;
+	case 0x3:
+		status.bits_per_sample = 24;
+		break;
+	case 0x4:
+		status.bits_per_sample = 32;
+		break;
+	default:
+		dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
+			(int)value);
+		status.bits_per_sample = 16;
+	}
+
+	/* current sampling rate in HZ */
+	if (value & 0x4000)
+		status.rate = 44100;
+	else
+		status.rate = 48000;
+	status.rate *= ((value >> 11) & 0x7) + 1;
+	status.rate /= ((value >> 8) & 0x7) + 1;
+
+	value = RREG32(R600_AUDIO_STATUS_BITS);
+
+	/* iec 60958 status bits */
+	status.status_bits = value & 0xff;
+
+	/* iec 60958 category code */
+	status.category_code = (value >> 8) & 0xff;
+
+	return status;
+}
+
+/*
+ * update all hdmi interfaces with current audio parameters
+ */
+void r600_audio_update_hdmi(void *arg, int pending)
+{
+	struct radeon_device *rdev = arg;
+	struct drm_device *dev = rdev->ddev;
+	struct r600_audio audio_status = r600_audio_status(rdev);
+	struct drm_encoder *encoder;
+	bool changed = false;
+
+	if (rdev->audio_status.channels != audio_status.channels ||
+	    rdev->audio_status.rate != audio_status.rate ||
+	    rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
+	    rdev->audio_status.status_bits != audio_status.status_bits ||
+	    rdev->audio_status.category_code != audio_status.category_code) {
+		rdev->audio_status = audio_status;
+		changed = true;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (!radeon_dig_encoder(encoder))
+			continue;
+		if (changed || r600_hdmi_buffer_status_changed(encoder))
+			r600_hdmi_update_audio_settings(encoder);
+	}
+}
+
+/*
+ * turn on/off audio engine
+ */
+static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+{
+	u32 value = 0;
+	DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
+	if (ASIC_IS_DCE4(rdev)) {
+		if (enable) {
+			value |= 0x81000000; /* Required to enable audio */
+			value |= 0x0e1000f0; /* fglrx sets that too */
+		}
+		WREG32(EVERGREEN_AUDIO_ENABLE, value);
+	} else {
+		WREG32_P(R600_AUDIO_ENABLE,
+			 enable ? 0x81000000 : 0x0, ~0x81000000);
+	}
+	rdev->audio_enabled = enable;
+}
+
+/*
+ * initialize the audio vars
+ */
+int r600_audio_init(struct radeon_device *rdev)
+{
+	if (!radeon_audio || !r600_audio_chipset_supported(rdev))
+		return 0;
+
+	r600_audio_engine_enable(rdev, true);
+
+	rdev->audio_status.channels = -1;
+	rdev->audio_status.rate = -1;
+	rdev->audio_status.bits_per_sample = -1;
+	rdev->audio_status.status_bits = 0;
+	rdev->audio_status.category_code = 0;
+
+	return 0;
+}
+
+/*
+ * atach the audio codec to the clock source of the encoder
+ */
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	int base_rate = 48000;
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
+		break;
+	default:
+		dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
+			  radeon_encoder->encoder_id);
+		return;
+	}
+
+	if (ASIC_IS_DCE4(rdev)) {
+		/* TODO: other PLLs? */
+		WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
+		WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
+		WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
+
+		/* Select DTO source */
+		WREG32(0x5ac, radeon_crtc->crtc_id);
+	} else {
+		switch (dig->dig_encoder) {
+		case 0:
+			WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+			WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
+			WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+			break;
+
+		case 1:
+			WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+			WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
+			WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+			break;
+		default:
+			dev_err(rdev->dev,
+				"Unsupported DIG on encoder 0x%02X\n",
+				radeon_encoder->encoder_id);
+			return;
+		}
+	}
+}
+
+/*
+ * release the audio timer
+ * TODO: How to do this correctly on SMP systems?
+ */
+void r600_audio_fini(struct radeon_device *rdev)
+{
+	if (!rdev->audio_enabled)
+		return;
+
+	r600_audio_engine_enable(rdev, false);
+}


Property changes on: trunk/sys/dev/drm2/radeon/r600_audio.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_blit.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_blit.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_blit.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,877 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher at amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_blit.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+
+#include "r600_blit_shaders.h"
+
+#define DI_PT_RECTLIST        0x11
+#define DI_INDEX_SIZE_16_BIT  0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8                 0x1
+#define FMT_5_6_5             0x8
+#define FMT_8_8_8_8           0x1a
+#define COLOR_8               0x1
+#define COLOR_5_6_5           0x8
+#define COLOR_8_8_8_8         0x1a
+
+static void
+set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
+{
+	u32 cb_color_info;
+	int pitch, slice;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	h = roundup2(h, 8);
+	if (h < 8)
+		h = 8;
+
+	cb_color_info = ((format << 2) | (1 << 27));
+	pitch = (w / 8) - 1;
+	slice = ((w * h) / 64) - 1;
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
+		BEGIN_RING(21 + 2);
+		OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+		OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+		OUT_RING(gpu_addr >> 8);
+		OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
+		OUT_RING(2 << 0);
+	} else {
+		BEGIN_RING(21);
+		OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+		OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+		OUT_RING(gpu_addr >> 8);
+	}
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((pitch << 0) | (slice << 10));
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(cb_color_info);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	ADVANCE_RING();
+}
+
+static void
+cp_set_surface_sync(drm_radeon_private_t *dev_priv,
+		    u32 sync_type, u32 size, u64 mc_addr)
+{
+	u32 cp_coher_size;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	if (size == 0xffffffff)
+		cp_coher_size = 0xffffffff;
+	else
+		cp_coher_size = ((size + 255) >> 8);
+
+	BEGIN_RING(5);
+	OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
+	OUT_RING(sync_type);
+	OUT_RING(cp_coher_size);
+	OUT_RING((mc_addr >> 8));
+	OUT_RING(10); /* poll interval */
+	ADVANCE_RING();
+}
+
+static void
+set_shaders(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u64 gpu_addr;
+	int i;
+	u32 *vs, *ps;
+	uint32_t sq_pgm_resources;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* load shaders */
+	vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
+	ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
+
+	for (i = 0; i < r6xx_vs_size; i++)
+		vs[i] = cpu_to_le32(r6xx_vs[i]);
+	for (i = 0; i < r6xx_ps_size; i++)
+		ps[i] = cpu_to_le32(r6xx_ps[i]);
+
+	dev_priv->blit_vb->used = 512;
+
+	gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset;
+
+	/* setup shader regs */
+	sq_pgm_resources = (1 << 0);
+
+	BEGIN_RING(9 + 12);
+	/* VS */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(gpu_addr >> 8);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(sq_pgm_resources);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	/* PS */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((gpu_addr + 256) >> 8);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(sq_pgm_resources | (1 << 28));
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(2);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+	ADVANCE_RING();
+
+	cp_set_surface_sync(dev_priv,
+			    R600_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+static void
+set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
+{
+	uint32_t sq_vtx_constant_word2;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2U << 30);
+#endif
+
+	BEGIN_RING(9);
+	OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
+	OUT_RING(0x460);
+	OUT_RING(gpu_addr & 0xffffffff);
+	OUT_RING(48 - 1);
+	OUT_RING(sq_vtx_constant_word2);
+	OUT_RING(1 << 0);
+	OUT_RING(0);
+	OUT_RING(0);
+	OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30);
+	ADVANCE_RING();
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
+		cp_set_surface_sync(dev_priv,
+				    R600_TC_ACTION_ENA, 48, gpu_addr);
+	else
+		cp_set_surface_sync(dev_priv,
+				    R600_VC_ACTION_ENA, 48, gpu_addr);
+}
+
+static void
+set_tex_resource(drm_radeon_private_t *dev_priv,
+		 int format, int w, int h, int pitch, u64 gpu_addr)
+{
+	uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	if (h < 1)
+		h = 1;
+
+	sq_tex_resource_word0 = (1 << 0);
+	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
+				  ((w - 1) << 19));
+
+	sq_tex_resource_word1 = (format << 26);
+	sq_tex_resource_word1 |= ((h - 1) << 0);
+
+	sq_tex_resource_word4 = ((1 << 14) |
+				 (0 << 16) |
+				 (1 << 19) |
+				 (2 << 22) |
+				 (3 << 25));
+
+	BEGIN_RING(9);
+	OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
+	OUT_RING(0);
+	OUT_RING(sq_tex_resource_word0);
+	OUT_RING(sq_tex_resource_word1);
+	OUT_RING(gpu_addr >> 8);
+	OUT_RING(gpu_addr >> 8);
+	OUT_RING(sq_tex_resource_word4);
+	OUT_RING(0);
+	OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30);
+	ADVANCE_RING();
+
+}
+
+static void
+set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(12);
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+	OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((x1 << 0) | (y1 << 16));
+	OUT_RING((x2 << 0) | (y2 << 16));
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+	OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((x1 << 0) | (y1 << 16) | (1U << 31));
+	OUT_RING((x2 << 0) | (y2 << 16));
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+	OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((x1 << 0) | (y1 << 16) | (1U << 31));
+	OUT_RING((x2 << 0) | (y2 << 16));
+	ADVANCE_RING();
+}
+
+static void
+draw_auto(drm_radeon_private_t *dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(10);
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+	OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2);
+	OUT_RING(DI_PT_RECTLIST);
+
+	OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+#ifdef __BIG_ENDIAN
+	OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
+#else
+	OUT_RING(DI_INDEX_SIZE_16_BIT);
+#endif
+
+	OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
+	OUT_RING(1);
+
+	OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
+	OUT_RING(3);
+	OUT_RING(DI_SRC_SEL_AUTO_INDEX);
+
+	ADVANCE_RING();
+	COMMIT_RING();
+}
+
+static void
+set_default_state(drm_radeon_private_t *dev_priv)
+{
+	int i;
+	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
+	u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
+	int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
+	int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+	int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+	RING_LOCALS;
+
+	switch ((dev_priv->flags & RADEON_FAMILY_MASK)) {
+	case CHIP_R600:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 144;
+		num_vs_threads = 40;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	default:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV670:
+		num_ps_gprs = 144;
+		num_vs_gprs = 40;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV770:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 188;
+		num_vs_threads = 60;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 256;
+		num_vs_stack_entries = 256;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV730:
+	case CHIP_RV740:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 188;
+		num_vs_threads = 60;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV710:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 144;
+		num_vs_threads = 48;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	}
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
+		sq_config = 0;
+	else
+		sq_config = R600_VC_ENABLE;
+
+	sq_config |= (R600_DX9_CONSTS |
+		      R600_ALU_INST_PREFER_VECTOR |
+		      R600_PS_PRIO(0) |
+		      R600_VS_PRIO(1) |
+		      R600_GS_PRIO(2) |
+		      R600_ES_PRIO(3));
+
+	sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) |
+				  R600_NUM_VS_GPRS(num_vs_gprs) |
+				  R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+	sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) |
+				  R600_NUM_ES_GPRS(num_es_gprs));
+	sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) |
+				   R600_NUM_VS_THREADS(num_vs_threads) |
+				   R600_NUM_GS_THREADS(num_gs_threads) |
+				   R600_NUM_ES_THREADS(num_es_threads));
+	sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+				    R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+	sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+				    R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+		BEGIN_RING(r7xx_default_size + 10);
+		for (i = 0; i < r7xx_default_size; i++)
+			OUT_RING(r7xx_default_state[i]);
+	} else {
+		BEGIN_RING(r6xx_default_size + 10);
+		for (i = 0; i < r6xx_default_size; i++)
+			OUT_RING(r6xx_default_state[i]);
+	}
+	OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+	OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+	/* SQ config */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6));
+	OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2);
+	OUT_RING(sq_config);
+	OUT_RING(sq_gpr_resource_mgmt_1);
+	OUT_RING(sq_gpr_resource_mgmt_2);
+	OUT_RING(sq_thread_resource_mgmt);
+	OUT_RING(sq_stack_resource_mgmt_1);
+	OUT_RING(sq_stack_resource_mgmt_2);
+	ADVANCE_RING();
+}
+
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS  23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24.  Above that, we round towards zero
+ * as the fractional bits will not fit in a float.  (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
+{
+	uint32_t msb, exponent, fraction;
+
+	/* Zero is special */
+	if (!x) return 0;
+
+	/* Get location of the most significant bit */
+	msb = fls(x);
+
+	/*
+	 * Use a rotate instead of a shift because that works both leftwards
+	 * and rightwards due to the mod(32) behaviour.  This means we don't
+	 * need to check to see if we are above 2^24 or not.
+	 */
+	fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+	exponent = (127 + msb) << I2F_FRAC_BITS;
+
+	return fraction + exponent;
+}
+
+static int r600_nomm_get_vb(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	dev_priv->blit_vb = radeon_freelist_get(dev);
+	if (!dev_priv->blit_vb) {
+		DRM_ERROR("Unable to allocate vertex buffer for blit\n");
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static void r600_nomm_put_vb(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->blit_vb->used = 0;
+	radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
+}
+
+static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	return (((char *)dev->agp_buffer_map->handle +
+		 dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
+}
+
+int
+r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int ret;
+	DRM_DEBUG("\n");
+
+	ret = r600_nomm_get_vb(dev);
+	if (ret)
+		return ret;
+
+	dev_priv->blit_vb->file_priv = file_priv;
+
+	set_default_state(dev_priv);
+	set_shaders(dev);
+
+	return 0;
+}
+
+
+void
+r600_done_blit_copy(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(5);
+	OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+	OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+	/* wait for 3D idle clean */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+	OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
+	OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
+
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	r600_nomm_put_vb(dev);
+}
+
+void
+r600_blit_copy(struct drm_device *dev,
+	       uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+	       int size_bytes)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int max_bytes;
+	u64 vb_addr;
+	u32 *vb;
+
+	vb = r600_nomm_get_vb_ptr(dev);
+
+	if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
+		max_bytes = 8192;
+
+		while (size_bytes) {
+			int cur_size = size_bytes;
+			int src_x = src_gpu_addr & 255;
+			int dst_x = dst_gpu_addr & 255;
+			int h = 1;
+			src_gpu_addr = src_gpu_addr & ~255;
+			dst_gpu_addr = dst_gpu_addr & ~255;
+
+			if (!src_x && !dst_x) {
+				h = (cur_size / max_bytes);
+				if (h > 8192)
+					h = 8192;
+				if (h == 0)
+					h = 1;
+				else
+					cur_size = max_bytes;
+			} else {
+				if (cur_size > max_bytes)
+					cur_size = max_bytes;
+				if (cur_size > (max_bytes - dst_x))
+					cur_size = (max_bytes - dst_x);
+				if (cur_size > (max_bytes - src_x))
+					cur_size = (max_bytes - src_x);
+			}
+
+			if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+
+				r600_nomm_put_vb(dev);
+				r600_nomm_get_vb(dev);
+				if (!dev_priv->blit_vb)
+					return;
+				set_shaders(dev);
+				vb = r600_nomm_get_vb_ptr(dev);
+			}
+
+			vb[0] = int2float(dst_x);
+			vb[1] = 0;
+			vb[2] = int2float(src_x);
+			vb[3] = 0;
+
+			vb[4] = int2float(dst_x);
+			vb[5] = int2float(h);
+			vb[6] = int2float(src_x);
+			vb[7] = int2float(h);
+
+			vb[8] = int2float(dst_x + cur_size);
+			vb[9] = int2float(h);
+			vb[10] = int2float(src_x + cur_size);
+			vb[11] = int2float(h);
+
+			/* src */
+			set_tex_resource(dev_priv, FMT_8,
+					 src_x + cur_size, h, src_x + cur_size,
+					 src_gpu_addr);
+
+			cp_set_surface_sync(dev_priv,
+					    R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+			/* dst */
+			set_render_target(dev_priv, COLOR_8,
+					  dst_x + cur_size, h,
+					  dst_gpu_addr);
+
+			/* scissors */
+			set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h);
+
+			/* Vertex buffer setup */
+			vb_addr = dev_priv->gart_buffers_offset +
+				dev_priv->blit_vb->offset +
+				dev_priv->blit_vb->used;
+			set_vtx_resource(dev_priv, vb_addr);
+
+			/* draw */
+			draw_auto(dev_priv);
+
+			cp_set_surface_sync(dev_priv,
+					    R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+					    cur_size * h, dst_gpu_addr);
+
+			vb += 12;
+			dev_priv->blit_vb->used += 12 * 4;
+
+			src_gpu_addr += cur_size * h;
+			dst_gpu_addr += cur_size * h;
+			size_bytes -= cur_size * h;
+		}
+	} else {
+		max_bytes = 8192 * 4;
+
+		while (size_bytes) {
+			int cur_size = size_bytes;
+			int src_x = (src_gpu_addr & 255);
+			int dst_x = (dst_gpu_addr & 255);
+			int h = 1;
+			src_gpu_addr = src_gpu_addr & ~255;
+			dst_gpu_addr = dst_gpu_addr & ~255;
+
+			if (!src_x && !dst_x) {
+				h = (cur_size / max_bytes);
+				if (h > 8192)
+					h = 8192;
+				if (h == 0)
+					h = 1;
+				else
+					cur_size = max_bytes;
+			} else {
+				if (cur_size > max_bytes)
+					cur_size = max_bytes;
+				if (cur_size > (max_bytes - dst_x))
+					cur_size = (max_bytes - dst_x);
+				if (cur_size > (max_bytes - src_x))
+					cur_size = (max_bytes - src_x);
+			}
+
+			if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+				r600_nomm_put_vb(dev);
+				r600_nomm_get_vb(dev);
+				if (!dev_priv->blit_vb)
+					return;
+
+				set_shaders(dev);
+				vb = r600_nomm_get_vb_ptr(dev);
+			}
+
+			vb[0] = int2float(dst_x / 4);
+			vb[1] = 0;
+			vb[2] = int2float(src_x / 4);
+			vb[3] = 0;
+
+			vb[4] = int2float(dst_x / 4);
+			vb[5] = int2float(h);
+			vb[6] = int2float(src_x / 4);
+			vb[7] = int2float(h);
+
+			vb[8] = int2float((dst_x + cur_size) / 4);
+			vb[9] = int2float(h);
+			vb[10] = int2float((src_x + cur_size) / 4);
+			vb[11] = int2float(h);
+
+			/* src */
+			set_tex_resource(dev_priv, FMT_8_8_8_8,
+					 (src_x + cur_size) / 4,
+					 h, (src_x + cur_size) / 4,
+					 src_gpu_addr);
+
+			cp_set_surface_sync(dev_priv,
+					    R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+			/* dst */
+			set_render_target(dev_priv, COLOR_8_8_8_8,
+					  (dst_x + cur_size) / 4, h,
+					  dst_gpu_addr);
+
+			/* scissors */
+			set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
+
+			/* Vertex buffer setup */
+			vb_addr = dev_priv->gart_buffers_offset +
+				dev_priv->blit_vb->offset +
+				dev_priv->blit_vb->used;
+			set_vtx_resource(dev_priv, vb_addr);
+
+			/* draw */
+			draw_auto(dev_priv);
+
+			cp_set_surface_sync(dev_priv,
+					    R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+					    cur_size * h, dst_gpu_addr);
+
+			vb += 12;
+			dev_priv->blit_vb->used += 12 * 4;
+
+			src_gpu_addr += cur_size * h;
+			dst_gpu_addr += cur_size * h;
+			size_bytes -= cur_size * h;
+		}
+	}
+}
+
+void
+r600_blit_swap(struct drm_device *dev,
+	       uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+	       int sx, int sy, int dx, int dy,
+	       int w, int h, int src_pitch, int dst_pitch, int cpp)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int cb_format, tex_format;
+	int sx2, sy2, dx2, dy2;
+	u64 vb_addr;
+	u32 *vb;
+
+	if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+
+		r600_nomm_put_vb(dev);
+		r600_nomm_get_vb(dev);
+		if (!dev_priv->blit_vb)
+			return;
+
+		set_shaders(dev);
+	}
+	vb = r600_nomm_get_vb_ptr(dev);
+
+	sx2 = sx + w;
+	sy2 = sy + h;
+	dx2 = dx + w;
+	dy2 = dy + h;
+
+	vb[0] = int2float(dx);
+	vb[1] = int2float(dy);
+	vb[2] = int2float(sx);
+	vb[3] = int2float(sy);
+
+	vb[4] = int2float(dx);
+	vb[5] = int2float(dy2);
+	vb[6] = int2float(sx);
+	vb[7] = int2float(sy2);
+
+	vb[8] = int2float(dx2);
+	vb[9] = int2float(dy2);
+	vb[10] = int2float(sx2);
+	vb[11] = int2float(sy2);
+
+	switch(cpp) {
+	case 4:
+		cb_format = COLOR_8_8_8_8;
+		tex_format = FMT_8_8_8_8;
+		break;
+	case 2:
+		cb_format = COLOR_5_6_5;
+		tex_format = FMT_5_6_5;
+		break;
+	default:
+		cb_format = COLOR_8;
+		tex_format = FMT_8;
+		break;
+	}
+
+	/* src */
+	set_tex_resource(dev_priv, tex_format,
+			 src_pitch / cpp,
+			 sy2, src_pitch / cpp,
+			 src_gpu_addr);
+
+	cp_set_surface_sync(dev_priv,
+			    R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
+
+	/* dst */
+	set_render_target(dev_priv, cb_format,
+			  dst_pitch / cpp, dy2,
+			  dst_gpu_addr);
+
+	/* scissors */
+	set_scissors(dev_priv, dx, dy, dx2, dy2);
+
+	/* Vertex buffer setup */
+	vb_addr = dev_priv->gart_buffers_offset +
+		dev_priv->blit_vb->offset +
+		dev_priv->blit_vb->used;
+	set_vtx_resource(dev_priv, vb_addr);
+
+	/* draw */
+	draw_auto(dev_priv);
+
+	cp_set_surface_sync(dev_priv,
+			    R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+			    dst_pitch * dy2, dst_gpu_addr);
+
+	dev_priv->blit_vb->used += 12 * 4;
+}


Property changes on: trunk/sys/dev/drm2/radeon/r600_blit.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_blit_kms.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_blit_kms.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_blit_kms.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,759 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_blit_kms.c 261455 2014-02-04 03:36:42Z eadler $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+
+#include "r600d.h"
+#include "r600_blit_shaders.h"
+#include "radeon_blit_common.h"
+
+/* emits 21 on rv770+, 23 on r600 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+		  int w, int h, u64 gpu_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 cb_color_info;
+	int pitch, slice;
+
+	h = roundup2(h, 8);
+	if (h < 8)
+		h = 8;
+
+	cb_color_info = CB_FORMAT(format) |
+		CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
+		CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+	pitch = (w / 8) - 1;
+	slice = ((w * h) / 64) - 1;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+
+	if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+		radeon_ring_write(ring, 2 << 0);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, (pitch << 0) | (slice << 10));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, cb_color_info);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+		    u32 sync_type, u32 size,
+		    u64 mc_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 cp_coher_size;
+
+	if (size == 0xffffffff)
+		cp_coher_size = 0xffffffff;
+	else
+		cp_coher_size = ((size + 255) >> 8);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, sync_type);
+	radeon_ring_write(ring, cp_coher_size);
+	radeon_ring_write(ring, mc_addr >> 8);
+	radeon_ring_write(ring, 10); /* poll interval */
+}
+
+/* emits 21dw + 1 surface sync = 26dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u64 gpu_addr;
+	u32 sq_pgm_resources;
+
+	/* setup shader regs */
+	sq_pgm_resources = (1 << 0);
+
+	/* VS */
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, sq_pgm_resources);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	/* PS */
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 2);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+	cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 9 + 1 sync (5) = 14*/
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_vtx_constant_word2;
+
+	sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+		SQ_VTXC_STRIDE(16);
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |=  SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+#endif
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+	radeon_ring_write(ring, 0x460);
+	radeon_ring_write(ring, gpu_addr & 0xffffffff);
+	radeon_ring_write(ring, 48 - 1);
+	radeon_ring_write(ring, sq_vtx_constant_word2);
+	radeon_ring_write(ring, 1 << 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
+
+	if ((rdev->family == CHIP_RV610) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RS780) ||
+	    (rdev->family == CHIP_RS880) ||
+	    (rdev->family == CHIP_RV710))
+		cp_set_surface_sync(rdev,
+				    PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+	else
+		cp_set_surface_sync(rdev,
+				    PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+}
+
+/* emits 9 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+		 int format, int w, int h, int pitch,
+		 u64 gpu_addr, u32 size)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
+
+	if (h < 1)
+		h = 1;
+
+	sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
+		S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+	sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
+		S_038000_TEX_WIDTH(w - 1);
+
+	sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
+	sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
+
+	sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
+		S_038010_DST_SEL_X(SQ_SEL_X) |
+		S_038010_DST_SEL_Y(SQ_SEL_Y) |
+		S_038010_DST_SEL_Z(SQ_SEL_Z) |
+		S_038010_DST_SEL_W(SQ_SEL_W);
+
+	cp_set_surface_sync(rdev,
+			    PACKET3_TC_ACTION_ENA, size, gpu_addr);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, sq_tex_resource_word0);
+	radeon_ring_write(ring, sq_tex_resource_word1);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, sq_tex_resource_word4);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+	     int x2, int y2)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1U << 31));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1U << 31));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, DI_PT_RECTLIST);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+	radeon_ring_write(ring, 1);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+	radeon_ring_write(ring, 3);
+	radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 14 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
+	u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
+	int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
+	int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+	int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+	u64 gpu_addr;
+	int dwords;
+
+	switch (rdev->family) {
+	case CHIP_R600:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 144;
+		num_vs_threads = 40;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	default:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV670:
+		num_ps_gprs = 144;
+		num_vs_gprs = 40;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV770:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 188;
+		num_vs_threads = 60;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 256;
+		num_vs_stack_entries = 256;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV730:
+	case CHIP_RV740:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 188;
+		num_vs_threads = 60;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV710:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 144;
+		num_vs_threads = 48;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	}
+
+	if ((rdev->family == CHIP_RV610) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RS780) ||
+	    (rdev->family == CHIP_RS880) ||
+	    (rdev->family == CHIP_RV710))
+		sq_config = 0;
+	else
+		sq_config = VC_ENABLE;
+
+	sq_config |= (DX9_CONSTS |
+		      ALU_INST_PREFER_VECTOR |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+				  NUM_VS_GPRS(num_vs_gprs) |
+				  NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+	sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+				  NUM_ES_GPRS(num_es_gprs));
+	sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+				   NUM_VS_THREADS(num_vs_threads) |
+				   NUM_GS_THREADS(num_gs_threads) |
+				   NUM_ES_THREADS(num_es_threads));
+	sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+				    NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+	sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+				    NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+
+	/* emit an IB pointing at default state */
+	dwords = roundup2(rdev->r600_blit.state_len, 0x10);
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+	radeon_ring_write(ring, dwords);
+
+	/* SQ config */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+	radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, sq_config);
+	radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+	radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+	radeon_ring_write(ring, sq_thread_resource_mgmt);
+	radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+	radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+}
+
+int r600_blit_init(struct radeon_device *rdev)
+{
+	u32 obj_size;
+	int i, r, dwords;
+	void *ptr;
+	u32 packet2s[16];
+	int num_packet2s = 0;
+
+	rdev->r600_blit.primitives.set_render_target = set_render_target;
+	rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
+	rdev->r600_blit.primitives.set_shaders = set_shaders;
+	rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
+	rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
+	rdev->r600_blit.primitives.set_scissors = set_scissors;
+	rdev->r600_blit.primitives.draw_auto = draw_auto;
+	rdev->r600_blit.primitives.set_default_state = set_default_state;
+
+	rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+	rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
+	rdev->r600_blit.ring_size_common += 5; /* done copy */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
+
+	rdev->r600_blit.ring_size_per_loop = 76;
+	/* set_render_target emits 2 extra dwords on rv6xx */
+	if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
+		rdev->r600_blit.ring_size_per_loop += 2;
+
+	rdev->r600_blit.max_dim = 8192;
+
+	rdev->r600_blit.state_offset = 0;
+
+	if (rdev->family >= CHIP_RV770)
+		rdev->r600_blit.state_len = r7xx_default_size;
+	else
+		rdev->r600_blit.state_len = r6xx_default_size;
+
+	dwords = rdev->r600_blit.state_len;
+	while (dwords & 0xf) {
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+		dwords++;
+	}
+
+	obj_size = dwords * 4;
+	obj_size = roundup2(obj_size, 256);
+
+	rdev->r600_blit.vs_offset = obj_size;
+	obj_size += r6xx_vs_size * 4;
+	obj_size = roundup2(obj_size, 256);
+
+	rdev->r600_blit.ps_offset = obj_size;
+	obj_size += r6xx_ps_size * 4;
+	obj_size = roundup2(obj_size, 256);
+
+	/* pin copy shader into vram if not already initialized */
+	if (rdev->r600_blit.shader_obj == NULL) {
+		r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM,
+				     NULL, &rdev->r600_blit.shader_obj);
+		if (r) {
+			DRM_ERROR("r600 failed to allocate shader\n");
+			return r;
+		}
+
+		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+				  &rdev->r600_blit.shader_gpu_addr);
+		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+			return r;
+		}
+	}
+
+	DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
+		  obj_size,
+		  rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+	if (r) {
+		DRM_ERROR("failed to map blit object %d\n", r);
+		return r;
+	}
+	if (rdev->family >= CHIP_RV770)
+		memcpy_toio((char *)ptr + rdev->r600_blit.state_offset,
+			    r7xx_default_state, rdev->r600_blit.state_len * 4);
+	else
+		memcpy_toio((char *)ptr + rdev->r600_blit.state_offset,
+			    r6xx_default_state, rdev->r600_blit.state_len * 4);
+	if (num_packet2s)
+		memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+			    packet2s, num_packet2s * 4);
+	for (i = 0; i < r6xx_vs_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
+	for (i = 0; i < r6xx_ps_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
+	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+	return 0;
+}
+
+void r600_blit_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	if (rdev->r600_blit.shader_obj == NULL)
+		return;
+	/* If we can't reserve the bo, unref should be enough to destroy
+	 * it when it becomes idle.
+	 */
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (!r) {
+		radeon_bo_unpin(rdev->r600_blit.shader_obj);
+		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+	}
+	radeon_bo_unref(&rdev->r600_blit.shader_obj);
+}
+
+static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
+				      int *width, int *height, int max_dim)
+{
+	unsigned max_pages;
+	unsigned pages = num_gpu_pages;
+	int w, h;
+
+	if (num_gpu_pages == 0) {
+		/* not supposed to be called with no pages, but just in case */
+		h = 0;
+		w = 0;
+		pages = 0;
+		DRM_ERROR("%s: called with no pages", __func__);
+	} else {
+		int rect_order = 2;
+		h = RECT_UNIT_H;
+		while (num_gpu_pages / rect_order) {
+			h *= 2;
+			rect_order *= 4;
+			if (h >= max_dim) {
+				h = max_dim;
+				break;
+			}
+		}
+		max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
+		if (pages > max_pages)
+			pages = max_pages;
+		w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
+		w = (w / RECT_UNIT_W) * RECT_UNIT_W;
+		pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
+		KASSERT(pages != 0, ("r600_blit_create_rect: pages == 0"));
+	}
+
+
+	DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
+
+	/* return width and height only of the caller wants it */
+	if (height)
+		*height = h;
+	if (width)
+		*width = w;
+
+	return pages;
+}
+
+
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+			   struct radeon_fence **fence, struct radeon_sa_bo **vb,
+			   struct radeon_semaphore **sem)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+	int ring_size;
+	int num_loops = 0;
+	int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
+
+	/* num loops */
+	while (num_gpu_pages) {
+		num_gpu_pages -=
+			r600_blit_create_rect(num_gpu_pages, NULL, NULL,
+					      rdev->r600_blit.max_dim);
+		num_loops++;
+	}
+
+	/* 48 bytes for vertex per loop */
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
+			     (num_loops*48)+256, 256, true);
+	if (r) {
+		return r;
+	}
+
+	r = radeon_semaphore_create(rdev, sem);
+	if (r) {
+		radeon_sa_bo_free(rdev, vb, NULL);
+		return r;
+	}
+
+	/* calculate number of loops correctly */
+	ring_size = num_loops * dwords_per_loop;
+	ring_size += rdev->r600_blit.ring_size_common;
+	r = radeon_ring_lock(rdev, ring, ring_size);
+	if (r) {
+		radeon_sa_bo_free(rdev, vb, NULL);
+		radeon_semaphore_free(rdev, sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
+		radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
+					    RADEON_RING_TYPE_GFX_INDEX);
+		radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
+	} else {
+		radeon_semaphore_free(rdev, sem, NULL);
+	}
+
+	rdev->r600_blit.primitives.set_default_state(rdev);
+	rdev->r600_blit.primitives.set_shaders(rdev);
+	return 0;
+}
+
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+			 struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_sa_bo_free(rdev, &vb, *fence);
+	radeon_semaphore_free(rdev, &sem, *fence);
+}
+
+void r600_kms_blit_copy(struct radeon_device *rdev,
+			u64 src_gpu_addr, u64 dst_gpu_addr,
+			unsigned num_gpu_pages,
+			struct radeon_sa_bo *vb)
+{
+	u64 vb_gpu_addr;
+	u32 *vb_cpu_addr;
+
+	DRM_DEBUG("emitting copy %16jx %16jx %d\n",
+		  (uintmax_t)src_gpu_addr, (uintmax_t)dst_gpu_addr, num_gpu_pages);
+	vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
+	vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
+
+	while (num_gpu_pages) {
+		int w, h;
+		unsigned size_in_bytes;
+		unsigned pages_per_loop =
+			r600_blit_create_rect(num_gpu_pages, &w, &h,
+					      rdev->r600_blit.max_dim);
+
+		size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
+		DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
+
+		vb_cpu_addr[0] = 0;
+		vb_cpu_addr[1] = 0;
+		vb_cpu_addr[2] = 0;
+		vb_cpu_addr[3] = 0;
+
+		vb_cpu_addr[4] = 0;
+		vb_cpu_addr[5] = int2float(h);
+		vb_cpu_addr[6] = 0;
+		vb_cpu_addr[7] = int2float(h);
+
+		vb_cpu_addr[8] = int2float(w);
+		vb_cpu_addr[9] = int2float(h);
+		vb_cpu_addr[10] = int2float(w);
+		vb_cpu_addr[11] = int2float(h);
+
+		rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
+							    w, h, w, src_gpu_addr, size_in_bytes);
+		rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
+							     w, h, dst_gpu_addr);
+		rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
+		rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
+		rdev->r600_blit.primitives.draw_auto(rdev);
+		rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
+				    PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+				    size_in_bytes, dst_gpu_addr);
+
+		vb_cpu_addr += 12;
+		vb_gpu_addr += 4*12;
+		src_gpu_addr += size_in_bytes;
+		dst_gpu_addr += size_in_bytes;
+		num_gpu_pages -= pages_per_loop;
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/r600_blit_kms.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_blit_shaders.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_blit_shaders.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_blit_shaders.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,721 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher at amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_blit_shaders.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+
+/*
+ * R6xx+ cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The regsiter state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 r6xx_default_state[] =
+{
+	0xc0002400, /* START_3D_CMDBUF */
+	0x00000000,
+
+	0xc0012800, /* CONTEXT_CONTROL */
+	0x80000000,
+	0x80000000,
+
+	0xc0016800,
+	0x00000010,
+	0x00008000, /* WAIT_UNTIL */
+
+	0xc0016800,
+	0x00000542,
+	0x07000003, /* TA_CNTL_AUX */
+
+	0xc0016800,
+	0x000005c5,
+	0x00000000, /* VC_ENHANCE */
+
+	0xc0016800,
+	0x00000363,
+	0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
+	0xc0016800,
+	0x0000060c,
+	0x82000000, /* DB_DEBUG */
+
+	0xc0016800,
+	0x0000060e,
+	0x01020204, /* DB_WATERMARKS */
+
+	0xc0026f00,
+	0x00000000,
+	0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+	0x00000000, /* SQ_VTX_START_INST_LOC */
+
+	0xc0096900,
+	0x0000022a,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000004,
+	0x00000000, /* DB_DEPTH_INFO */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0026900,
+	0x00000343,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000040, /* DB_RENDER_OVERRIDE */
+
+	0xc0016900,
+	0x00000351,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc00f6900,
+	0x00000100,
+	0x00000800, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* CB_FOG_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000, /* DB_STENCILREFMASK */
+	0x00000000, /* DB_STENCILREFMASK_BF */
+	0x00000000, /* SX_ALPHA_REF */
+
+	0xc0046900,
+	0x0000030c,
+	0x01000000, /* CB_CLRCMP_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000048,
+	0x3f800000, /* CB_CLEAR_RED */
+	0x00000000,
+	0x3f800000,
+	0x3f800000,
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00a6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000, /* PA_SC_EDGERULE */
+
+	0xc0406900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MPASS_PS_CNTL */
+	0x00004010, /* PA_SC_MODE_CNTL */
+
+	0xc0096900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x0000002d, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
+	0x00000000,
+
+	0xc0016900,
+	0x00000312,
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc0066900,
+	0x0000037e,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+	0xc0046900,
+	0x000001b6,
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00000000, /* SPI_FOG_FUNC_SCALE */
+	0x00000000, /* SPI_FOG_FUNC_BIAS */
+
+	0xc0016900,
+	0x00000225,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0016900,
+	0x00000237,
+	0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_REUSE_DEPTH */
+	0x00000000, /* VGT_GROUP_PRIM_TYPE */
+	0x00000000, /* VGT_GROUP_FIRST_DECR */
+	0x00000000, /* VGT_GROUP_DECR */
+	0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+	0xc0036900,
+	0x000002ac,
+	0x00000000, /* VGT_STRMOUT_EN */
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /* VGT_VTX_CNT_EN */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0016900,
+	0x000002c8,
+	0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+	0xc0076900,
+	0x00000202,
+	0x00cc0000, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CNTL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000244, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+
+	0xc0026900,
+	0x0000008e,
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0016900,
+	0x000001e8,
+	0x00000001, /* CB_SHADER_CONTROL */
+
+	0xc0016900,
+	0x00000185,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc0056900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+	0x00000000, /* SPI_THREAD_GROUPING */
+	0x00000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+	0xc0036e00, /* SET_SAMPLER */
+	0x00000000,
+	0x00000012,
+	0x00000000,
+	0x00000000,
+};
+
+const u32 r7xx_default_state[] =
+{
+	0xc0012800, /* CONTEXT_CONTROL */
+	0x80000000,
+	0x80000000,
+
+	0xc0016800,
+	0x00000010,
+	0x00008000, /* WAIT_UNTIL */
+
+	0xc0016800,
+	0x00000542,
+	0x07000002, /* TA_CNTL_AUX */
+
+	0xc0016800,
+	0x000005c5,
+	0x00000000, /* VC_ENHANCE */
+
+	0xc0016800,
+	0x00000363,
+	0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
+	0xc0016800,
+	0x0000060c,
+	0x00000000, /* DB_DEBUG */
+
+	0xc0016800,
+	0x0000060e,
+	0x00420204, /* DB_WATERMARKS */
+
+	0xc0026f00,
+	0x00000000,
+	0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+	0x00000000, /* SQ_VTX_START_INST_LOC */
+
+	0xc0096900,
+	0x0000022a,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000004,
+	0x00000000, /* DB_DEPTH_INFO */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0026900,
+	0x00000343,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_RENDER_OVERRIDE */
+
+	0xc0016900,
+	0x00000351,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0096900,
+	0x00000100,
+	0x00000800, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0036900,
+	0x0000010c,
+	0x00000000, /* DB_STENCILREFMASK */
+	0x00000000, /* DB_STENCILREFMASK_BF */
+	0x00000000, /* SX_ALPHA_REF */
+
+	0xc0046900,
+	0x0000030c, /* CB_CLRCMP_CNTL */
+	0x01000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00a6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+
+	0xc0406900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MPASS_PS_CNTL */
+	0x00514000, /* PA_SC_MODE_CNTL */
+
+	0xc0096900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x0000002d, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
+	0x00000000,
+
+	0xc0016900,
+	0x00000312,
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc0066900,
+	0x0000037e,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+	0xc0046900,
+	0x000001b6,
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00000000, /* SPI_FOG_FUNC_SCALE */
+	0x00000000, /* SPI_FOG_FUNC_BIAS */
+
+	0xc0016900,
+	0x00000225,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0016900,
+	0x00000237,
+	0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_REUSE_DEPTH */
+	0x00000000, /* VGT_GROUP_PRIM_TYPE */
+	0x00000000, /* VGT_GROUP_FIRST_DECR */
+	0x00000000, /* VGT_GROUP_DECR */
+	0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+	0xc0036900,
+	0x000002ac,
+	0x00000000, /* VGT_STRMOUT_EN */
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /* VGT_VTX_CNT_EN */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0016900,
+	0x000002c8,
+	0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+	0xc0076900,
+	0x00000202,
+	0x00cc0000, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CNTL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000244, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+
+	0xc0026900,
+	0x0000008e,
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0016900,
+	0x000001e8,
+	0x00000001, /* CB_SHADER_CONTROL */
+
+	0xc0016900,
+	0x00000185,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc0056900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+	0x00000001, /* SPI_THREAD_GROUPING */
+	0x00000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+	0xc0036e00, /* SET_SAMPLER */
+	0x00000000,
+	0x00000012,
+	0x00000000,
+	0x00000000,
+};
+
+/* same for r6xx/r7xx */
+const u32 r6xx_vs[] =
+{
+	0x00000004,
+	0x81000000,
+	0x0000203c,
+	0x94000b08,
+	0x00004000,
+	0x14200b1a,
+	0x00000000,
+	0x00000000,
+	0x3c000000,
+	0x68cd1000,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
+	0x00080000,
+#endif
+	0x00000000,
+};
+
+const u32 r6xx_ps[] =
+{
+	0x00000002,
+	0x80800000,
+	0x00000000,
+	0x94200688,
+	0x00000010,
+	0x000d1000,
+	0xb0800000,
+	0x00000000,
+};
+
+const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps);
+const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs);
+const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state);
+const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);


Property changes on: trunk/sys/dev/drm2/radeon/r600_blit_shaders.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_blit_shaders.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_blit_shaders.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_blit_shaders.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,43 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef R600_BLIT_SHADERS_H
+#define R600_BLIT_SHADERS_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_blit_shaders.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+extern const u32 r6xx_ps[];
+extern const u32 r6xx_vs[];
+extern const u32 r7xx_default_state[];
+extern const u32 r6xx_default_state[];
+
+
+extern const u32 r6xx_ps_size, r6xx_vs_size;
+extern const u32 r6xx_default_size, r7xx_default_size;
+
+__pure uint32_t int2float(uint32_t x);
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/r600_blit_shaders.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_cp.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_cp.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_cp.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2677 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008-2009 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Dave Airlie <airlied at redhat.com>
+ *     Alex Deucher <alexander.deucher at amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_cp.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/linker.h>
+#include <sys/firmware.h>
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+#include "r600_cp.h"
+
+#define PFP_UCODE_SIZE 576
+#define PM4_UCODE_SIZE 1792
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+
+#ifdef __linux__
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/R600_pfp.bin");
+MODULE_FIRMWARE("radeon/R600_me.bin");
+MODULE_FIRMWARE("radeon/RV610_pfp.bin");
+MODULE_FIRMWARE("radeon/RV610_me.bin");
+MODULE_FIRMWARE("radeon/RV630_pfp.bin");
+MODULE_FIRMWARE("radeon/RV630_me.bin");
+MODULE_FIRMWARE("radeon/RV620_pfp.bin");
+MODULE_FIRMWARE("radeon/RV620_me.bin");
+MODULE_FIRMWARE("radeon/RV635_pfp.bin");
+MODULE_FIRMWARE("radeon/RV635_me.bin");
+MODULE_FIRMWARE("radeon/RV670_pfp.bin");
+MODULE_FIRMWARE("radeon/RV670_me.bin");
+MODULE_FIRMWARE("radeon/RS780_pfp.bin");
+MODULE_FIRMWARE("radeon/RS780_me.bin");
+MODULE_FIRMWARE("radeon/RV770_pfp.bin");
+MODULE_FIRMWARE("radeon/RV770_me.bin");
+MODULE_FIRMWARE("radeon/RV730_pfp.bin");
+MODULE_FIRMWARE("radeon/RV730_me.bin");
+MODULE_FIRMWARE("radeon/RV710_pfp.bin");
+MODULE_FIRMWARE("radeon/RV710_me.bin");
+#endif
+
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
+			unsigned family, u32 *ib, int *l);
+void r600_cs_legacy_init(void);
+#endif
+
+# define ATI_PCIGART_PAGE_SIZE		4096	/**< PCI GART page size */
+# define ATI_PCIGART_PAGE_MASK		(~(ATI_PCIGART_PAGE_SIZE-1))
+
+#define R600_PTE_VALID     (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+/* MAX values used for gfx init */
+#define R6XX_MAX_SH_GPRS           256
+#define R6XX_MAX_TEMP_GPRS         16
+#define R6XX_MAX_SH_THREADS        256
+#define R6XX_MAX_SH_STACK_ENTRIES  4096
+#define R6XX_MAX_BACKENDS          8
+#define R6XX_MAX_BACKENDS_MASK     0xff
+#define R6XX_MAX_SIMDS             8
+#define R6XX_MAX_SIMDS_MASK        0xff
+#define R6XX_MAX_PIPES             8
+#define R6XX_MAX_PIPES_MASK        0xff
+
+#define R7XX_MAX_SH_GPRS           256
+#define R7XX_MAX_TEMP_GPRS         16
+#define R7XX_MAX_SH_THREADS        256
+#define R7XX_MAX_SH_STACK_ENTRIES  4096
+#define R7XX_MAX_BACKENDS          8
+#define R7XX_MAX_BACKENDS_MASK     0xff
+#define R7XX_MAX_SIMDS             16
+#define R7XX_MAX_SIMDS_MASK        0xffff
+#define R7XX_MAX_PIPES             8
+#define R7XX_MAX_PIPES_MASK        0xff
+
+static int r600_do_wait_for_fifo(drm_radeon_private_t *dev_priv, int entries)
+{
+	int i;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		int slots;
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+			slots = (RADEON_READ(R600_GRBM_STATUS)
+				 & R700_CMDFIFO_AVAIL_MASK);
+		else
+			slots = (RADEON_READ(R600_GRBM_STATUS)
+				 & R600_CMDFIFO_AVAIL_MASK);
+		if (slots >= entries)
+			return 0;
+		DRM_UDELAY(1);
+	}
+	DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
+		 RADEON_READ(R600_GRBM_STATUS),
+		 RADEON_READ(R600_GRBM_STATUS2));
+
+	return -EBUSY;
+}
+
+static int r600_do_wait_for_idle(drm_radeon_private_t *dev_priv)
+{
+	int i, ret;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+		ret = r600_do_wait_for_fifo(dev_priv, 8);
+	else
+		ret = r600_do_wait_for_fifo(dev_priv, 16);
+	if (ret)
+		return ret;
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(RADEON_READ(R600_GRBM_STATUS) & R600_GUI_ACTIVE))
+			return 0;
+		DRM_UDELAY(1);
+	}
+	DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
+		 RADEON_READ(R600_GRBM_STATUS),
+		 RADEON_READ(R600_GRBM_STATUS2));
+
+	return -EBUSY;
+}
+
+void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_sg_mem *entry = dev->sg;
+#ifdef __linux__
+	int max_pages;
+	int pages;
+	int i;
+#endif
+
+	if (!entry)
+		return;
+
+	if (gart_info->bus_addr) {
+#ifdef __linux__
+		max_pages = (gart_info->table_size / sizeof(u64));
+		pages = (entry->pages <= max_pages)
+		  ? entry->pages : max_pages;
+
+		for (i = 0; i < pages; i++) {
+			if (!entry->busaddr[i])
+				break;
+			pci_unmap_page(dev->pdev, entry->busaddr[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+#endif
+		if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+			gart_info->bus_addr = 0;
+	}
+}
+
+/* R600 has page table setup */
+int r600_page_table_init(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
+	struct drm_local_map *map = &gart_info->mapping;
+	struct drm_sg_mem *entry = dev->sg;
+	int ret = 0;
+	int i, j;
+	int pages;
+	u64 page_base;
+	dma_addr_t entry_addr;
+	int max_ati_pages, max_real_pages, gart_idx;
+
+	/* okay page table is available - lets rock */
+	max_ati_pages = (gart_info->table_size / sizeof(u64));
+	max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+
+	pages = (entry->pages <= max_real_pages) ?
+		entry->pages : max_real_pages;
+
+	memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u64));
+
+	gart_idx = 0;
+	for (i = 0; i < pages; i++) {
+#ifdef __linux__
+		entry->busaddr[i] = pci_map_page(dev->pdev,
+						 entry->pagelist[i], 0,
+						 PAGE_SIZE,
+						 PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
+			DRM_ERROR("unable to map PCIGART pages!\n");
+			r600_page_table_cleanup(dev, gart_info);
+			goto done;
+		}
+#endif
+		entry_addr = entry->busaddr[i];
+		for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+			page_base = (u64) entry_addr & ATI_PCIGART_PAGE_MASK;
+			page_base |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+			page_base |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+
+			DRM_WRITE64(map, gart_idx * sizeof(u64), page_base);
+
+			gart_idx++;
+
+			if ((i % 128) == 0)
+				DRM_DEBUG("page entry %d: 0x%016llx\n",
+				    i, (unsigned long long)page_base);
+			entry_addr += ATI_PCIGART_PAGE_SIZE;
+		}
+	}
+	ret = 1;
+#ifdef __linux__
+done:
+#endif
+	return ret;
+}
+
+static void r600_vm_flush_gart_range(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u32 resp, countdown = 1000;
+	RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+	RADEON_WRITE(R600_VM_CONTEXT0_REQUEST_RESPONSE, 2);
+
+	do {
+		resp = RADEON_READ(R600_VM_CONTEXT0_REQUEST_RESPONSE);
+		countdown--;
+		DRM_UDELAY(1);
+	} while (((resp & 0xf0) == 0) && countdown);
+}
+
+static void r600_vm_init(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	/* initialise the VM to use the page table we constructed up there */
+	u32 vm_c0, i;
+	u32 mc_rd_a;
+	u32 vm_l2_cntl, vm_l2_cntl3;
+	/* okay set up the PCIE aperture type thingo */
+	RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+	RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+
+	/* setup MC RD a */
+	mc_rd_a = R600_MCD_L1_TLB | R600_MCD_L1_FRAG_PROC | R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS |
+		R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | R600_MCD_EFFECTIVE_L1_TLB_SIZE(5) |
+		R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(5) | R600_MCD_WAIT_L2_QUERY;
+
+	RADEON_WRITE(R600_MCD_RD_A_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_RD_B_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_WR_A_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_WR_B_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_RD_GFX_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_WR_GFX_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_RD_SYS_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_WR_SYS_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_RD_HDP_CNTL, mc_rd_a | R600_MCD_L1_STRICT_ORDERING);
+	RADEON_WRITE(R600_MCD_WR_HDP_CNTL, mc_rd_a /*| R600_MCD_L1_STRICT_ORDERING*/);
+
+	RADEON_WRITE(R600_MCD_RD_PDMA_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_WR_PDMA_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_RD_SEM_CNTL, mc_rd_a | R600_MCD_SEMAPHORE_MODE);
+	RADEON_WRITE(R600_MCD_WR_SEM_CNTL, mc_rd_a);
+
+	vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
+	vm_l2_cntl |= R600_VM_L2_CNTL_QUEUE_SIZE(7);
+	RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
+
+	RADEON_WRITE(R600_VM_L2_CNTL2, 0);
+	vm_l2_cntl3 = (R600_VM_L2_CNTL3_BANK_SELECT_0(0) |
+		       R600_VM_L2_CNTL3_BANK_SELECT_1(1) |
+		       R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(2));
+	RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
+
+	vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
+
+	RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
+
+	vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
+
+	/* disable all other contexts */
+	for (i = 1; i < 8; i++)
+		RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
+
+	RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
+	RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+
+	r600_vm_flush_gart_range(dev);
+}
+
+static int r600_cp_init_microcode(drm_radeon_private_t *dev_priv)
+{
+	const char *chip_name;
+	size_t pfp_req_size, me_req_size;
+	char fw_name[30];
+	int err;
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_R600:  chip_name = "R600";  break;
+	case CHIP_RV610: chip_name = "RV610"; break;
+	case CHIP_RV630: chip_name = "RV630"; break;
+	case CHIP_RV620: chip_name = "RV620"; break;
+	case CHIP_RV635: chip_name = "RV635"; break;
+	case CHIP_RV670: chip_name = "RV670"; break;
+	case CHIP_RS780:
+	case CHIP_RS880: chip_name = "RS780"; break;
+	case CHIP_RV770: chip_name = "RV770"; break;
+	case CHIP_RV730:
+	case CHIP_RV740: chip_name = "RV730"; break;
+	case CHIP_RV710: chip_name = "RV710"; break;
+	default:         panic("%s: Unsupported family %d", __func__, dev_priv->flags & RADEON_FAMILY_MASK);
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
+		me_req_size = R700_PM4_UCODE_SIZE * 4;
+	} else {
+		pfp_req_size = PFP_UCODE_SIZE * 4;
+		me_req_size = PM4_UCODE_SIZE * 12;
+	}
+
+	DRM_INFO("Loading %s CP Microcode\n", chip_name);
+	err = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name);
+	dev_priv->pfp_fw = firmware_get(fw_name);
+	if (dev_priv->pfp_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (dev_priv->pfp_fw->datasize != pfp_req_size) {
+		DRM_ERROR(
+		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       dev_priv->pfp_fw->datasize, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name);
+	dev_priv->me_fw = firmware_get(fw_name);
+	if (dev_priv->me_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (dev_priv->me_fw->datasize != me_req_size) {
+		DRM_ERROR(
+		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       dev_priv->me_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+out:
+	if (err) {
+		if (err != -EINVAL)
+			DRM_ERROR(
+			       "r600_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		if (dev_priv->pfp_fw != NULL) {
+			firmware_put(dev_priv->pfp_fw, FIRMWARE_UNLOAD);
+			dev_priv->pfp_fw = NULL;
+		}
+		if (dev_priv->me_fw != NULL) {
+			firmware_put(dev_priv->me_fw, FIRMWARE_UNLOAD);
+			dev_priv->me_fw = NULL;
+		}
+	}
+	return err;
+}
+
+static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!dev_priv->me_fw || !dev_priv->pfp_fw)
+		return;
+
+	r600_do_cp_stop(dev_priv);
+
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_NO_UPDATE |
+		     R600_RB_BLKSZ(15) |
+		     R600_RB_BUFSZ(3));
+
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+	mdelay(15);
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+	fw_data = (const __be32 *)dev_priv->me_fw->data;
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
+		RADEON_WRITE(R600_CP_ME_RAM_DATA,
+			     be32_to_cpup(fw_data++));
+
+	fw_data = (const __be32 *)dev_priv->pfp_fw->data;
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < PFP_UCODE_SIZE; i++)
+		RADEON_WRITE(R600_CP_PFP_UCODE_DATA,
+			     be32_to_cpup(fw_data++));
+
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+	RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
+
+}
+
+static void r700_vm_init(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	/* initialise the VM to use the page table we constructed up there */
+	u32 vm_c0, i;
+	u32 mc_vm_md_l1;
+	u32 vm_l2_cntl, vm_l2_cntl3;
+	/* okay set up the PCIE aperture type thingo */
+	RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+	RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+
+	mc_vm_md_l1 = R700_ENABLE_L1_TLB |
+	    R700_ENABLE_L1_FRAGMENT_PROCESSING |
+	    R700_SYSTEM_ACCESS_MODE_IN_SYS |
+	    R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+	    R700_EFFECTIVE_L1_TLB_SIZE(5) |
+	    R700_EFFECTIVE_L1_QUEUE_SIZE(5);
+
+	RADEON_WRITE(R700_MC_VM_MD_L1_TLB0_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MD_L1_TLB1_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MD_L1_TLB2_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MB_L1_TLB0_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MB_L1_TLB1_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MB_L1_TLB2_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MB_L1_TLB3_CNTL, mc_vm_md_l1);
+
+	vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
+	vm_l2_cntl |= R700_VM_L2_CNTL_QUEUE_SIZE(7);
+	RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
+
+	RADEON_WRITE(R600_VM_L2_CNTL2, 0);
+	vm_l2_cntl3 = R700_VM_L2_CNTL3_BANK_SELECT(0) | R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(2);
+	RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
+
+	vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
+
+	RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
+
+	vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
+
+	/* disable all other contexts */
+	for (i = 1; i < 8; i++)
+		RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
+
+	RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
+	RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+
+	r600_vm_flush_gart_range(dev);
+}
+
+static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!dev_priv->me_fw || !dev_priv->pfp_fw)
+		return;
+
+	r600_do_cp_stop(dev_priv);
+
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_NO_UPDATE |
+		     R600_RB_BLKSZ(15) |
+		     R600_RB_BUFSZ(3));
+
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+	mdelay(15);
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+	fw_data = (const __be32 *)dev_priv->pfp_fw->data;
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
+		RADEON_WRITE(R600_CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)dev_priv->me_fw->data;
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
+		RADEON_WRITE(R600_CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+	RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
+
+}
+
+static void r600_test_writeback(drm_radeon_private_t *dev_priv)
+{
+	u32 tmp;
+
+	/* Start with assuming that writeback doesn't work */
+	dev_priv->writeback_works = 0;
+
+	/* Writeback doesn't seem to work everywhere, test it here and possibly
+	 * enable it if it appears to work
+	 */
+	radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
+
+	RADEON_WRITE(R600_SCRATCH_REG1, 0xdeadbeef);
+
+	for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+		u32 val;
+
+		val = radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(1));
+		if (val == 0xdeadbeef)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (tmp < dev_priv->usec_timeout) {
+		dev_priv->writeback_works = 1;
+		DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+	} else {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback test failed\n");
+	}
+	if (radeon_no_wb == 1) {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback forced off\n");
+	}
+
+	if (!dev_priv->writeback_works) {
+		/* Disable writeback to avoid unnecessary bus master transfer */
+		RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+			     R600_BUF_SWAP_32BIT |
+#endif
+			     RADEON_READ(R600_CP_RB_CNTL) |
+			     R600_RB_NO_UPDATE);
+		RADEON_WRITE(R600_SCRATCH_UMSK, 0);
+	}
+}
+
+int r600_do_engine_reset(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u32 cp_ptr, cp_me_cntl, cp_rb_cntl;
+
+	DRM_INFO("Resetting GPU\n");
+
+	cp_ptr = RADEON_READ(R600_CP_RB_WPTR);
+	cp_me_cntl = RADEON_READ(R600_CP_ME_CNTL);
+	RADEON_WRITE(R600_CP_ME_CNTL, R600_CP_ME_HALT);
+
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0x7fff);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+	DRM_UDELAY(50);
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+
+	RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
+	cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_RPTR_WR_ENA);
+
+	RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
+	RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
+	RADEON_WRITE(R600_CP_RB_CNTL, cp_rb_cntl);
+	RADEON_WRITE(R600_CP_ME_CNTL, cp_me_cntl);
+
+	/* Reset the CP ring */
+	r600_do_cp_reset(dev_priv);
+
+	/* The CP is no longer running after an engine reset */
+	dev_priv->cp_running = 0;
+
+	/* Reset any pending vertex, indirect buffers */
+	radeon_freelist_reset(dev);
+
+	return 0;
+
+}
+
+static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+					     u32 num_backends,
+					     u32 backend_disable_mask)
+{
+	u32 backend_map = 0;
+	u32 enabled_backends_mask;
+	u32 enabled_backends_count;
+	u32 cur_pipe;
+	u32 swizzle_pipe[R6XX_MAX_PIPES];
+	u32 cur_backend;
+	u32 i;
+
+	if (num_tile_pipes > R6XX_MAX_PIPES)
+		num_tile_pipes = R6XX_MAX_PIPES;
+	if (num_tile_pipes < 1)
+		num_tile_pipes = 1;
+	if (num_backends > R6XX_MAX_BACKENDS)
+		num_backends = R6XX_MAX_BACKENDS;
+	if (num_backends < 1)
+		num_backends = 1;
+
+	enabled_backends_mask = 0;
+	enabled_backends_count = 0;
+	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
+		if (((backend_disable_mask >> i) & 1) == 0) {
+			enabled_backends_mask |= (1 << i);
+			++enabled_backends_count;
+		}
+		if (enabled_backends_count == num_backends)
+			break;
+	}
+
+	if (enabled_backends_count == 0) {
+		enabled_backends_mask = 1;
+		enabled_backends_count = 1;
+	}
+
+	if (enabled_backends_count != num_backends)
+		num_backends = enabled_backends_count;
+
+	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
+	switch (num_tile_pipes) {
+	case 1:
+		swizzle_pipe[0] = 0;
+		break;
+	case 2:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		break;
+	case 3:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		swizzle_pipe[2] = 2;
+		break;
+	case 4:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		swizzle_pipe[2] = 2;
+		swizzle_pipe[3] = 3;
+		break;
+	case 5:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		swizzle_pipe[2] = 2;
+		swizzle_pipe[3] = 3;
+		swizzle_pipe[4] = 4;
+		break;
+	case 6:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 2;
+		swizzle_pipe[2] = 4;
+		swizzle_pipe[3] = 5;
+		swizzle_pipe[4] = 1;
+		swizzle_pipe[5] = 3;
+		break;
+	case 7:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 2;
+		swizzle_pipe[2] = 4;
+		swizzle_pipe[3] = 6;
+		swizzle_pipe[4] = 1;
+		swizzle_pipe[5] = 3;
+		swizzle_pipe[6] = 5;
+		break;
+	case 8:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 2;
+		swizzle_pipe[2] = 4;
+		swizzle_pipe[3] = 6;
+		swizzle_pipe[4] = 1;
+		swizzle_pipe[5] = 3;
+		swizzle_pipe[6] = 5;
+		swizzle_pipe[7] = 7;
+		break;
+	}
+
+	cur_backend = 0;
+	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+		while (((1 << cur_backend) & enabled_backends_mask) == 0)
+			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+
+		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
+
+		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+	}
+
+	return backend_map;
+}
+
+static int r600_count_pipe_bits(uint32_t val)
+{
+	return hweight32(val);
+}
+
+static void r600_gfx_init(struct drm_device *dev,
+			  drm_radeon_private_t *dev_priv)
+{
+	int i, j, num_qd_pipes;
+	u32 sx_debug_1;
+	u32 tc_cntl;
+	u32 arb_pop;
+	u32 num_gs_verts_per_thread;
+	u32 vgt_gs_per_es;
+	u32 gs_prim_buffer_depth = 0;
+	u32 sq_ms_fifo_sizes;
+	u32 sq_config;
+	u32 sq_gpr_resource_mgmt_1 = 0;
+	u32 sq_gpr_resource_mgmt_2 = 0;
+	u32 sq_thread_resource_mgmt = 0;
+	u32 sq_stack_resource_mgmt_1 = 0;
+	u32 sq_stack_resource_mgmt_2 = 0;
+	u32 hdp_host_path_cntl;
+	u32 backend_map;
+	u32 gb_tiling_config = 0;
+	u32 cc_rb_backend_disable;
+	u32 cc_gc_shader_pipe_config;
+	u32 ramcfg;
+
+	/* setup chip specs */
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_R600:
+		dev_priv->r600_max_pipes = 4;
+		dev_priv->r600_max_tile_pipes = 8;
+		dev_priv->r600_max_simds = 4;
+		dev_priv->r600_max_backends = 4;
+		dev_priv->r600_max_gprs = 256;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 256;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 128;
+		dev_priv->r600_sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		dev_priv->r600_max_pipes = 2;
+		dev_priv->r600_max_tile_pipes = 2;
+		dev_priv->r600_max_simds = 3;
+		dev_priv->r600_max_backends = 1;
+		dev_priv->r600_max_gprs = 128;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 128;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 4;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 128;
+		dev_priv->r600_sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV610:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV620:
+		dev_priv->r600_max_pipes = 1;
+		dev_priv->r600_max_tile_pipes = 1;
+		dev_priv->r600_max_simds = 2;
+		dev_priv->r600_max_backends = 1;
+		dev_priv->r600_max_gprs = 128;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 128;
+		dev_priv->r600_max_hw_contexts = 4;
+		dev_priv->r600_max_gs_threads = 4;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 128;
+		dev_priv->r600_sq_num_cf_insts = 1;
+		break;
+	case CHIP_RV670:
+		dev_priv->r600_max_pipes = 4;
+		dev_priv->r600_max_tile_pipes = 4;
+		dev_priv->r600_max_simds = 4;
+		dev_priv->r600_max_backends = 4;
+		dev_priv->r600_max_gprs = 192;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 256;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 128;
+		dev_priv->r600_sq_num_cf_insts = 2;
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	j = 0;
+	for (i = 0; i < 32; i++) {
+		RADEON_WRITE((0x2c14 + j), 0x00000000);
+		RADEON_WRITE((0x2c18 + j), 0x00000000);
+		RADEON_WRITE((0x2c1c + j), 0x00000000);
+		RADEON_WRITE((0x2c20 + j), 0x00000000);
+		RADEON_WRITE((0x2c24 + j), 0x00000000);
+		j += 0x18;
+	}
+
+	RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
+
+	/* setup tiling, simd, pipe config */
+	ramcfg = RADEON_READ(R600_RAMCFG);
+
+	switch (dev_priv->r600_max_tile_pipes) {
+	case 1:
+		gb_tiling_config |= R600_PIPE_TILING(0);
+		break;
+	case 2:
+		gb_tiling_config |= R600_PIPE_TILING(1);
+		break;
+	case 4:
+		gb_tiling_config |= R600_PIPE_TILING(2);
+		break;
+	case 8:
+		gb_tiling_config |= R600_PIPE_TILING(3);
+		break;
+	default:
+		break;
+	}
+
+	gb_tiling_config |= R600_BANK_TILING((ramcfg >> R600_NOOFBANK_SHIFT) & R600_NOOFBANK_MASK);
+
+	gb_tiling_config |= R600_GROUP_SIZE(0);
+
+	if (((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK) > 3) {
+		gb_tiling_config |= R600_ROW_TILING(3);
+		gb_tiling_config |= R600_SAMPLE_SPLIT(3);
+	} else {
+		gb_tiling_config |=
+			R600_ROW_TILING(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
+		gb_tiling_config |=
+			R600_SAMPLE_SPLIT(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
+	}
+
+	gb_tiling_config |= R600_BANK_SWAPS(1);
+
+	cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	cc_rb_backend_disable |=
+		R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
+
+	cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+	cc_gc_shader_pipe_config |=
+		R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
+	cc_gc_shader_pipe_config |=
+		R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
+
+	backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+							(R6XX_MAX_BACKENDS -
+							 r600_count_pipe_bits((cc_rb_backend_disable &
+									       R6XX_MAX_BACKENDS_MASK) >> 16)),
+							(cc_rb_backend_disable >> 16));
+	gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+
+	RADEON_WRITE(R600_GB_TILING_CONFIG,      gb_tiling_config);
+	RADEON_WRITE(R600_DCP_TILING_CONFIG,    (gb_tiling_config & 0xffff));
+	RADEON_WRITE(R600_HDP_TILING_CONFIG,    (gb_tiling_config & 0xffff));
+	if (gb_tiling_config & 0xc0) {
+		dev_priv->r600_group_size = 512;
+	} else {
+		dev_priv->r600_group_size = 256;
+	}
+	dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+	if (gb_tiling_config & 0x30) {
+		dev_priv->r600_nbanks = 8;
+	} else {
+		dev_priv->r600_nbanks = 4;
+	}
+
+	RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
+	RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
+	RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+	num_qd_pipes =
+		R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
+	RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
+	RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
+
+	/* set HW defaults for 3D engine */
+	RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
+						R600_ROQ_IB2_START(0x2b)));
+
+	RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, (R600_MEQ_END(0x40) |
+					      R600_ROQ_END(0x40)));
+
+	RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
+					R600_SYNC_GRADIENT |
+					R600_SYNC_WALKER |
+					R600_SYNC_ALIGNER));
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670)
+		RADEON_WRITE(R600_ARB_GDEC_RD_CNTL, 0x00000021);
+
+	sx_debug_1 = RADEON_READ(R600_SX_DEBUG_1);
+	sx_debug_1 |= R600_SMX_EVENT_RELEASE;
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600))
+		sx_debug_1 |= R600_ENABLE_NEW_SMX_ADDRESS;
+	RADEON_WRITE(R600_SX_DEBUG_1, sx_debug_1);
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
+		RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE);
+	else
+		RADEON_WRITE(R600_DB_DEBUG, 0);
+
+	RADEON_WRITE(R600_DB_WATERMARKS, (R600_DEPTH_FREE(4) |
+					  R600_DEPTH_FLUSH(16) |
+					  R600_DEPTH_PENDING_FREE(4) |
+					  R600_DEPTH_CACHELINE_FREE(16)));
+	RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+	RADEON_WRITE(R600_VGT_NUM_INSTANCES, 0);
+
+	RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
+	RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(0));
+
+	sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES);
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
+		sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) |
+				    R600_FETCH_FIFO_HIWATER(0xa) |
+				    R600_DONE_FIFO_HIWATER(0xe0) |
+				    R600_ALU_UPDATE_FIFO_HIWATER(0x8));
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630)) {
+		sq_ms_fifo_sizes &= ~R600_DONE_FIFO_HIWATER(0xff);
+		sq_ms_fifo_sizes |= R600_DONE_FIFO_HIWATER(0x4);
+	}
+	RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RADEON_READ(R600_SQ_CONFIG);
+	sq_config &= ~(R600_PS_PRIO(3) |
+		       R600_VS_PRIO(3) |
+		       R600_GS_PRIO(3) |
+		       R600_ES_PRIO(3));
+	sq_config |= (R600_DX9_CONSTS |
+		      R600_VC_ENABLE |
+		      R600_PS_PRIO(0) |
+		      R600_VS_PRIO(1) |
+		      R600_GS_PRIO(2) |
+		      R600_ES_PRIO(3));
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) {
+		sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(124) |
+					  R600_NUM_VS_GPRS(124) |
+					  R600_NUM_CLAUSE_TEMP_GPRS(4));
+		sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(0) |
+					  R600_NUM_ES_GPRS(0));
+		sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(136) |
+					   R600_NUM_VS_THREADS(48) |
+					   R600_NUM_GS_THREADS(4) |
+					   R600_NUM_ES_THREADS(4));
+		sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(128) |
+					    R600_NUM_VS_STACK_ENTRIES(128));
+		sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(0) |
+					    R600_NUM_ES_STACK_ENTRIES(0));
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
+		/* no vertex cache */
+		sq_config &= ~R600_VC_ENABLE;
+
+		sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+					  R600_NUM_VS_GPRS(44) |
+					  R600_NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
+					  R600_NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+					   R600_NUM_VS_THREADS(78) |
+					   R600_NUM_GS_THREADS(4) |
+					   R600_NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
+					    R600_NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
+					    R600_NUM_ES_STACK_ENTRIES(16));
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV635)) {
+		sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+					  R600_NUM_VS_GPRS(44) |
+					  R600_NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(18) |
+					  R600_NUM_ES_GPRS(18));
+		sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+					   R600_NUM_VS_THREADS(78) |
+					   R600_NUM_GS_THREADS(4) |
+					   R600_NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
+					    R600_NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
+					    R600_NUM_ES_STACK_ENTRIES(16));
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670) {
+		sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+					  R600_NUM_VS_GPRS(44) |
+					  R600_NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
+					  R600_NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+					   R600_NUM_VS_THREADS(78) |
+					   R600_NUM_GS_THREADS(4) |
+					   R600_NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(64) |
+					    R600_NUM_VS_STACK_ENTRIES(64));
+		sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(64) |
+					    R600_NUM_ES_STACK_ENTRIES(64));
+	}
+
+	RADEON_WRITE(R600_SQ_CONFIG, sq_config);
+	RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
+	RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
+	RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
+		RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY));
+	else
+		RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC));
+
+	RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_2S, (R600_S0_X(0xc) |
+						    R600_S0_Y(0x4) |
+						    R600_S1_X(0x4) |
+						    R600_S1_Y(0xc)));
+	RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_4S, (R600_S0_X(0xe) |
+						    R600_S0_Y(0xe) |
+						    R600_S1_X(0x2) |
+						    R600_S1_Y(0x2) |
+						    R600_S2_X(0xa) |
+						    R600_S2_Y(0x6) |
+						    R600_S3_X(0x6) |
+						    R600_S3_Y(0xa)));
+	RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0, (R600_S0_X(0xe) |
+							R600_S0_Y(0xb) |
+							R600_S1_X(0x4) |
+							R600_S1_Y(0xc) |
+							R600_S2_X(0x1) |
+							R600_S2_Y(0x6) |
+							R600_S3_X(0xa) |
+							R600_S3_Y(0xe)));
+	RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1, (R600_S4_X(0x6) |
+							R600_S4_Y(0x1) |
+							R600_S5_X(0x0) |
+							R600_S5_Y(0x0) |
+							R600_S6_X(0xb) |
+							R600_S6_Y(0x4) |
+							R600_S7_X(0x7) |
+							R600_S7_Y(0x8)));
+
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_R600:
+	case CHIP_RV630:
+	case CHIP_RV635:
+		gs_prim_buffer_depth = 0;
+		break;
+	case CHIP_RV610:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV620:
+		gs_prim_buffer_depth = 32;
+		break;
+	case CHIP_RV670:
+		gs_prim_buffer_depth = 128;
+		break;
+	default:
+		break;
+	}
+
+	num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
+	vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+	/* Max value for this is 256 */
+	if (vgt_gs_per_es > 256)
+		vgt_gs_per_es = 256;
+
+	RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
+	RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
+	RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
+	RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
+	RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
+	RADEON_WRITE(R600_SX_MISC, 0);
+	RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
+	RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
+	RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
+	RADEON_WRITE(R600_SPI_INPUT_Z, 0);
+	RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
+	RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
+
+	/* clear render buffer base addresses */
+	RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV610:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV620:
+		tc_cntl = R600_TC_L2_SIZE(8);
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		tc_cntl = R600_TC_L2_SIZE(4);
+		break;
+	case CHIP_R600:
+		tc_cntl = R600_TC_L2_SIZE(0) | R600_L2_DISABLE_LATE_HIT;
+		break;
+	default:
+		tc_cntl = R600_TC_L2_SIZE(0);
+		break;
+	}
+
+	RADEON_WRITE(R600_TC_CNTL, tc_cntl);
+
+	hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
+	RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	arb_pop = RADEON_READ(R600_ARB_POP);
+	arb_pop |= R600_ENABLE_TC128;
+	RADEON_WRITE(R600_ARB_POP, arb_pop);
+
+	RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+	RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
+					  R600_NUM_CLIP_SEQ(3)));
+	RADEON_WRITE(R600_PA_SC_ENHANCE, R600_FORCE_EOV_MAX_CLK_CNT(4095));
+
+}
+
+static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
+					     u32 num_tile_pipes,
+					     u32 num_backends,
+					     u32 backend_disable_mask)
+{
+	u32 backend_map = 0;
+	u32 enabled_backends_mask;
+	u32 enabled_backends_count;
+	u32 cur_pipe;
+	u32 swizzle_pipe[R7XX_MAX_PIPES];
+	u32 cur_backend;
+	u32 i;
+	bool force_no_swizzle;
+
+	if (num_tile_pipes > R7XX_MAX_PIPES)
+		num_tile_pipes = R7XX_MAX_PIPES;
+	if (num_tile_pipes < 1)
+		num_tile_pipes = 1;
+	if (num_backends > R7XX_MAX_BACKENDS)
+		num_backends = R7XX_MAX_BACKENDS;
+	if (num_backends < 1)
+		num_backends = 1;
+
+	enabled_backends_mask = 0;
+	enabled_backends_count = 0;
+	for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
+		if (((backend_disable_mask >> i) & 1) == 0) {
+			enabled_backends_mask |= (1 << i);
+			++enabled_backends_count;
+		}
+		if (enabled_backends_count == num_backends)
+			break;
+	}
+
+	if (enabled_backends_count == 0) {
+		enabled_backends_mask = 1;
+		enabled_backends_count = 1;
+	}
+
+	if (enabled_backends_count != num_backends)
+		num_backends = enabled_backends_count;
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+		force_no_swizzle = false;
+		break;
+	case CHIP_RV710:
+	case CHIP_RV740:
+	default:
+		force_no_swizzle = true;
+		break;
+	}
+
+	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
+	switch (num_tile_pipes) {
+	case 1:
+		swizzle_pipe[0] = 0;
+		break;
+	case 2:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		break;
+	case 3:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 1;
+		}
+		break;
+	case 4:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 3;
+			swizzle_pipe[3] = 1;
+		}
+		break;
+	case 5:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 1;
+			swizzle_pipe[4] = 3;
+		}
+		break;
+	case 6:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+			swizzle_pipe[5] = 5;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 5;
+			swizzle_pipe[4] = 3;
+			swizzle_pipe[5] = 1;
+		}
+		break;
+	case 7:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+			swizzle_pipe[5] = 5;
+			swizzle_pipe[6] = 6;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 6;
+			swizzle_pipe[4] = 3;
+			swizzle_pipe[5] = 1;
+			swizzle_pipe[6] = 5;
+		}
+		break;
+	case 8:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+			swizzle_pipe[5] = 5;
+			swizzle_pipe[6] = 6;
+			swizzle_pipe[7] = 7;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 6;
+			swizzle_pipe[4] = 3;
+			swizzle_pipe[5] = 1;
+			swizzle_pipe[6] = 7;
+			swizzle_pipe[7] = 5;
+		}
+		break;
+	}
+
+	cur_backend = 0;
+	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+		while (((1 << cur_backend) & enabled_backends_mask) == 0)
+			cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
+
+		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
+
+		cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
+	}
+
+	return backend_map;
+}
+
+static void r700_gfx_init(struct drm_device *dev,
+			  drm_radeon_private_t *dev_priv)
+{
+	int i, j, num_qd_pipes;
+	u32 ta_aux_cntl;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 db_debug3;
+	u32 num_gs_verts_per_thread;
+	u32 vgt_gs_per_es;
+	u32 gs_prim_buffer_depth = 0;
+	u32 sq_ms_fifo_sizes;
+	u32 sq_config;
+	u32 sq_thread_resource_mgmt;
+	u32 hdp_host_path_cntl;
+	u32 sq_dyn_gpr_size_simd_ab_0;
+	u32 backend_map;
+	u32 gb_tiling_config = 0;
+	u32 cc_rb_backend_disable;
+	u32 cc_gc_shader_pipe_config;
+	u32 mc_arb_ramcfg;
+	u32 db_debug4;
+
+	/* setup chip specs */
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+		dev_priv->r600_max_pipes = 4;
+		dev_priv->r600_max_tile_pipes = 8;
+		dev_priv->r600_max_simds = 10;
+		dev_priv->r600_max_backends = 4;
+		dev_priv->r600_max_gprs = 256;
+		dev_priv->r600_max_threads = 248;
+		dev_priv->r600_max_stack_entries = 512;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16 * 2;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 112;
+		dev_priv->r600_sq_num_cf_insts = 2;
+
+		dev_priv->r700_sx_num_of_sets = 7;
+		dev_priv->r700_sc_prim_fifo_size = 0xF9;
+		dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+		dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV730:
+		dev_priv->r600_max_pipes = 2;
+		dev_priv->r600_max_tile_pipes = 4;
+		dev_priv->r600_max_simds = 8;
+		dev_priv->r600_max_backends = 2;
+		dev_priv->r600_max_gprs = 128;
+		dev_priv->r600_max_threads = 248;
+		dev_priv->r600_max_stack_entries = 256;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16 * 2;
+		dev_priv->r600_sx_max_export_size = 256;
+		dev_priv->r600_sx_max_export_pos_size = 32;
+		dev_priv->r600_sx_max_export_smx_size = 224;
+		dev_priv->r600_sq_num_cf_insts = 2;
+
+		dev_priv->r700_sx_num_of_sets = 7;
+		dev_priv->r700_sc_prim_fifo_size = 0xf9;
+		dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+		dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+		if (dev_priv->r600_sx_max_export_pos_size > 16) {
+			dev_priv->r600_sx_max_export_pos_size -= 16;
+			dev_priv->r600_sx_max_export_smx_size += 16;
+		}
+		break;
+	case CHIP_RV710:
+		dev_priv->r600_max_pipes = 2;
+		dev_priv->r600_max_tile_pipes = 2;
+		dev_priv->r600_max_simds = 2;
+		dev_priv->r600_max_backends = 1;
+		dev_priv->r600_max_gprs = 256;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 256;
+		dev_priv->r600_max_hw_contexts = 4;
+		dev_priv->r600_max_gs_threads = 8 * 2;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 112;
+		dev_priv->r600_sq_num_cf_insts = 1;
+
+		dev_priv->r700_sx_num_of_sets = 7;
+		dev_priv->r700_sc_prim_fifo_size = 0x40;
+		dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+		dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV740:
+		dev_priv->r600_max_pipes = 4;
+		dev_priv->r600_max_tile_pipes = 4;
+		dev_priv->r600_max_simds = 8;
+		dev_priv->r600_max_backends = 4;
+		dev_priv->r600_max_gprs = 256;
+		dev_priv->r600_max_threads = 248;
+		dev_priv->r600_max_stack_entries = 512;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16 * 2;
+		dev_priv->r600_sx_max_export_size = 256;
+		dev_priv->r600_sx_max_export_pos_size = 32;
+		dev_priv->r600_sx_max_export_smx_size = 224;
+		dev_priv->r600_sq_num_cf_insts = 2;
+
+		dev_priv->r700_sx_num_of_sets = 7;
+		dev_priv->r700_sc_prim_fifo_size = 0x100;
+		dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+		dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+
+		if (dev_priv->r600_sx_max_export_pos_size > 16) {
+			dev_priv->r600_sx_max_export_pos_size -= 16;
+			dev_priv->r600_sx_max_export_smx_size += 16;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	j = 0;
+	for (i = 0; i < 32; i++) {
+		RADEON_WRITE((0x2c14 + j), 0x00000000);
+		RADEON_WRITE((0x2c18 + j), 0x00000000);
+		RADEON_WRITE((0x2c1c + j), 0x00000000);
+		RADEON_WRITE((0x2c20 + j), 0x00000000);
+		RADEON_WRITE((0x2c24 + j), 0x00000000);
+		j += 0x18;
+	}
+
+	RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
+
+	/* setup tiling, simd, pipe config */
+	mc_arb_ramcfg = RADEON_READ(R700_MC_ARB_RAMCFG);
+
+	switch (dev_priv->r600_max_tile_pipes) {
+	case 1:
+		gb_tiling_config |= R600_PIPE_TILING(0);
+		break;
+	case 2:
+		gb_tiling_config |= R600_PIPE_TILING(1);
+		break;
+	case 4:
+		gb_tiling_config |= R600_PIPE_TILING(2);
+		break;
+	case 8:
+		gb_tiling_config |= R600_PIPE_TILING(3);
+		break;
+	default:
+		break;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
+		gb_tiling_config |= R600_BANK_TILING(1);
+	else
+		gb_tiling_config |= R600_BANK_TILING((mc_arb_ramcfg >> R700_NOOFBANK_SHIFT) & R700_NOOFBANK_MASK);
+
+	gb_tiling_config |= R600_GROUP_SIZE(0);
+
+	if (((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK) > 3) {
+		gb_tiling_config |= R600_ROW_TILING(3);
+		gb_tiling_config |= R600_SAMPLE_SPLIT(3);
+	} else {
+		gb_tiling_config |=
+			R600_ROW_TILING(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
+		gb_tiling_config |=
+			R600_SAMPLE_SPLIT(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
+	}
+
+	gb_tiling_config |= R600_BANK_SWAPS(1);
+
+	cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	cc_rb_backend_disable |=
+		R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
+
+	cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+	cc_gc_shader_pipe_config |=
+		R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
+	cc_gc_shader_pipe_config |=
+		R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+		backend_map = 0x28;
+	else
+		backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
+								dev_priv->r600_max_tile_pipes,
+								(R7XX_MAX_BACKENDS -
+								 r600_count_pipe_bits((cc_rb_backend_disable &
+										       R7XX_MAX_BACKENDS_MASK) >> 16)),
+								(cc_rb_backend_disable >> 16));
+	gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+
+	RADEON_WRITE(R600_GB_TILING_CONFIG,      gb_tiling_config);
+	RADEON_WRITE(R600_DCP_TILING_CONFIG,    (gb_tiling_config & 0xffff));
+	RADEON_WRITE(R600_HDP_TILING_CONFIG,    (gb_tiling_config & 0xffff));
+	if (gb_tiling_config & 0xc0) {
+		dev_priv->r600_group_size = 512;
+	} else {
+		dev_priv->r600_group_size = 256;
+	}
+	dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+	if (gb_tiling_config & 0x30) {
+		dev_priv->r600_nbanks = 8;
+	} else {
+		dev_priv->r600_nbanks = 4;
+	}
+
+	RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
+	RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
+	RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+	RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+	RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
+	RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
+	RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
+	RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
+
+	num_qd_pipes =
+		R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
+	RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
+	RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
+
+	/* set HW defaults for 3D engine */
+	RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
+						R600_ROQ_IB2_START(0x2b)));
+
+	RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
+
+	ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
+	RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
+
+	sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
+	sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
+	RADEON_WRITE(R700_SX_DEBUG_1, sx_debug_1);
+
+	smx_dc_ctl0 = RADEON_READ(R600_SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~R700_CACHE_DEPTH(0x1ff);
+	smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
+	RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
+		RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
+						  R700_GS_FLUSH_CTL(4) |
+						  R700_ACK_FLUSH_CTL(3) |
+						  R700_SYNC_FLUSH_CTL));
+
+	db_debug3 = RADEON_READ(R700_DB_DEBUG3);
+	db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+	case CHIP_RV740:
+		db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
+		break;
+	case CHIP_RV710:
+	case CHIP_RV730:
+	default:
+		db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
+		break;
+	}
+	RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
+		db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
+		db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
+		RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
+	}
+
+	RADEON_WRITE(R600_SX_EXPORT_BUFFER_SIZES, (R600_COLOR_BUFFER_SIZE((dev_priv->r600_sx_max_export_size / 4) - 1) |
+						   R600_POSITION_BUFFER_SIZE((dev_priv->r600_sx_max_export_pos_size / 4) - 1) |
+						   R600_SMX_BUFFER_SIZE((dev_priv->r600_sx_max_export_smx_size / 4) - 1)));
+
+	RADEON_WRITE(R700_PA_SC_FIFO_SIZE_R7XX, (R700_SC_PRIM_FIFO_SIZE(dev_priv->r700_sc_prim_fifo_size) |
+						 R700_SC_HIZ_TILE_FIFO_SIZE(dev_priv->r700_sc_hiz_tile_fifo_size) |
+						 R700_SC_EARLYZ_TILE_FIFO_SIZE(dev_priv->r700_sc_earlyz_tile_fifo_fize)));
+
+	RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+
+	RADEON_WRITE(R600_VGT_NUM_INSTANCES, 1);
+
+	RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
+
+	RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(4));
+
+	RADEON_WRITE(R600_CP_PERFMON_CNTL, 0);
+
+	sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(16 * dev_priv->r600_sq_num_cf_insts) |
+			    R600_DONE_FIFO_HIWATER(0xe0) |
+			    R600_ALU_UPDATE_FIFO_HIWATER(0x8));
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+		sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
+		break;
+	case CHIP_RV740:
+	default:
+		sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
+		break;
+	}
+	RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RADEON_READ(R600_SQ_CONFIG);
+	sq_config &= ~(R600_PS_PRIO(3) |
+		       R600_VS_PRIO(3) |
+		       R600_GS_PRIO(3) |
+		       R600_ES_PRIO(3));
+	sq_config |= (R600_DX9_CONSTS |
+		      R600_VC_ENABLE |
+		      R600_EXPORT_SRC_C |
+		      R600_PS_PRIO(0) |
+		      R600_VS_PRIO(1) |
+		      R600_GS_PRIO(2) |
+		      R600_ES_PRIO(3));
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
+		/* no vertex cache */
+		sq_config &= ~R600_VC_ENABLE;
+
+	RADEON_WRITE(R600_SQ_CONFIG, sq_config);
+
+	RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1,  (R600_NUM_PS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
+						    R600_NUM_VS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
+						    R600_NUM_CLAUSE_TEMP_GPRS(((dev_priv->r600_max_gprs * 24)/64)/2)));
+
+	RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2,  (R600_NUM_GS_GPRS((dev_priv->r600_max_gprs * 7)/64) |
+						    R600_NUM_ES_GPRS((dev_priv->r600_max_gprs * 7)/64)));
+
+	sq_thread_resource_mgmt = (R600_NUM_PS_THREADS((dev_priv->r600_max_threads * 4)/8) |
+				   R600_NUM_VS_THREADS((dev_priv->r600_max_threads * 2)/8) |
+				   R600_NUM_ES_THREADS((dev_priv->r600_max_threads * 1)/8));
+	if (((dev_priv->r600_max_threads * 1) / 8) > dev_priv->r600_max_gs_threads)
+		sq_thread_resource_mgmt |= R600_NUM_GS_THREADS(dev_priv->r600_max_gs_threads);
+	else
+		sq_thread_resource_mgmt |= R600_NUM_GS_THREADS((dev_priv->r600_max_gs_threads * 1)/8);
+	RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+
+	RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, (R600_NUM_PS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
+						     R600_NUM_VS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
+
+	RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, (R600_NUM_GS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
+						     R600_NUM_ES_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
+
+	sq_dyn_gpr_size_simd_ab_0 = (R700_SIMDA_RING0((dev_priv->r600_max_gprs * 38)/64) |
+				     R700_SIMDA_RING1((dev_priv->r600_max_gprs * 38)/64) |
+				     R700_SIMDB_RING0((dev_priv->r600_max_gprs * 38)/64) |
+				     R700_SIMDB_RING1((dev_priv->r600_max_gprs * 38)/64));
+
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
+
+	RADEON_WRITE(R700_PA_SC_FORCE_EOV_MAX_CNTS, (R700_FORCE_EOV_MAX_CLK_CNT(4095) |
+						     R700_FORCE_EOV_MAX_REZ_CNT(255)));
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
+		RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_TC_ONLY) |
+							   R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
+	else
+		RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_VC_AND_TC) |
+							   R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV740:
+		gs_prim_buffer_depth = 384;
+		break;
+	case CHIP_RV710:
+		gs_prim_buffer_depth = 128;
+		break;
+	default:
+		break;
+	}
+
+	num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
+	vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+	/* Max value for this is 256 */
+	if (vgt_gs_per_es > 256)
+		vgt_gs_per_es = 256;
+
+	RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
+	RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
+	RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
+	RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
+	RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
+	RADEON_WRITE(R600_SX_MISC, 0);
+	RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
+	RADEON_WRITE(R700_PA_SC_EDGERULE, 0xaaaaaaaa);
+	RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
+	RADEON_WRITE(R600_PA_SC_CLIPRECT_RULE, 0xffff);
+	RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
+	RADEON_WRITE(R600_SPI_INPUT_Z, 0);
+	RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
+	RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
+
+	/* clear render buffer base addresses */
+	RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
+
+	RADEON_WRITE(R700_TCP_CNTL, 0);
+
+	hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
+	RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+
+	RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
+					  R600_NUM_CLIP_SEQ(3)));
+
+}
+
+static void r600_cp_init_ring_buffer(struct drm_device *dev,
+				       drm_radeon_private_t *dev_priv,
+				       struct drm_file *file_priv)
+{
+	struct drm_radeon_master_private *master_priv;
+	u32 ring_start;
+	u64 rptr_addr;
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+		r700_gfx_init(dev, dev_priv);
+	else
+		r600_gfx_init(dev, dev_priv);
+
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+	mdelay(15);
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+
+	/* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     R600_BUF_SWAP_32BIT |
+		     R600_RB_NO_UPDATE |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     RADEON_RB_NO_UPDATE |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+	RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
+
+	/* Set the write pointer delay */
+	RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
+
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     R600_BUF_SWAP_32BIT |
+		     R600_RB_NO_UPDATE |
+		     R600_RB_RPTR_WR_ENA |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     R600_RB_NO_UPDATE |
+		     R600_RB_RPTR_WR_ENA |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+	/* Initialize the ring buffer's read and write pointers */
+	RADEON_WRITE(R600_CP_RB_RPTR_WR, 0);
+	RADEON_WRITE(R600_CP_RB_WPTR, 0);
+	SET_RING_HEAD(dev_priv, 0);
+	dev_priv->ring.tail = 0;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		rptr_addr = dev_priv->ring_rptr->offset
+			- dev->agp->base +
+			dev_priv->gart_vm_start;
+	} else
+#endif
+	{
+		rptr_addr = dev_priv->ring_rptr->offset
+			- ((unsigned long) dev->sg->vaddr)
+			+ dev_priv->gart_vm_start;
+	}
+	RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
+	RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
+
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     RADEON_BUF_SWAP_32BIT |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* XXX */
+		radeon_write_agp_base(dev_priv, dev->agp->base);
+
+		/* XXX */
+		radeon_write_agp_location(dev_priv,
+			     (((dev_priv->gart_vm_start - 1 +
+				dev_priv->gart_size) & 0xffff0000) |
+			      (dev_priv->gart_vm_start >> 16)));
+
+		ring_start = (dev_priv->cp_ring->offset
+			      - dev->agp->base
+			      + dev_priv->gart_vm_start);
+	} else
+#endif
+		ring_start = (dev_priv->cp_ring->offset
+			      - (unsigned long)dev->sg->vaddr>
+			      + dev_priv->gart_vm_start);
+
+	RADEON_WRITE(R600_CP_RB_BASE, ring_start >> 8);
+
+	RADEON_WRITE(R600_CP_ME_CNTL, 0xff);
+
+	RADEON_WRITE(R600_CP_DEBUG, (1 << 27) | (1 << 28));
+
+	/* Initialize the scratch register pointer.  This will cause
+	 * the scratch register values to be written out to memory
+	 * whenever they are updated.
+	 *
+	 * We simply put this behind the ring read pointer, this works
+	 * with PCI GART as well as (whatever kind of) AGP GART
+	 */
+	{
+		u64 scratch_addr;
+
+		scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
+		scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
+		scratch_addr += R600_SCRATCH_REG_OFFSET;
+		scratch_addr >>= 8;
+		scratch_addr &= 0xffffffff;
+
+		RADEON_WRITE(R600_SCRATCH_ADDR, (uint32_t)scratch_addr);
+	}
+
+	RADEON_WRITE(R600_SCRATCH_UMSK, 0x7);
+
+	/* Turn on bus mastering */
+	radeon_enable_bm(dev_priv);
+
+	radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(0), 0);
+	RADEON_WRITE(R600_LAST_FRAME_REG, 0);
+
+	radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
+	RADEON_WRITE(R600_LAST_DISPATCH_REG, 0);
+
+	radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(2), 0);
+	RADEON_WRITE(R600_LAST_CLEAR_REG, 0);
+
+	/* reset sarea copies of these */
+	master_priv = file_priv->master->driver_priv;
+	if (master_priv->sarea_priv) {
+		master_priv->sarea_priv->last_frame = 0;
+		master_priv->sarea_priv->last_dispatch = 0;
+		master_priv->sarea_priv->last_clear = 0;
+	}
+
+	r600_do_wait_for_idle(dev_priv);
+
+}
+
+int r600_do_cleanup_cp(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		if (dev_priv->cp_ring != NULL) {
+			drm_core_ioremapfree(dev_priv->cp_ring, dev);
+			dev_priv->cp_ring = NULL;
+		}
+		if (dev_priv->ring_rptr != NULL) {
+			drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+			dev_priv->ring_rptr = NULL;
+		}
+		if (dev->agp_buffer_map != NULL) {
+			drm_core_ioremapfree(dev->agp_buffer_map, dev);
+			dev->agp_buffer_map = NULL;
+		}
+	} else
+#endif
+	{
+
+		if (dev_priv->gart_info.bus_addr)
+			r600_page_table_cleanup(dev, &dev_priv->gart_info);
+
+		if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
+			drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
+			dev_priv->gart_info.addr = NULL;
+		}
+	}
+	/* only clear to the start of flags */
+	memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
+
+	return 0;
+}
+
+int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+		    struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+
+	DRM_DEBUG("\n");
+
+	sx_init(&dev_priv->cs_mutex, "drm__radeon_private__cs_mutex");
+	r600_cs_legacy_init();
+	/* if we require new memory map but we don't have it fail */
+	if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
+		DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
+		DRM_DEBUG("Forcing AGP card to PCI mode\n");
+		dev_priv->flags &= ~RADEON_IS_AGP;
+		/* The writeback test succeeds, but when writeback is enabled,
+		 * the ring buffer read ptr update fails after first 128 bytes.
+		 */
+		radeon_no_wb = 1;
+	} else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
+		 && !init->is_pci) {
+		DRM_DEBUG("Restoring AGP flag\n");
+		dev_priv->flags |= RADEON_IS_AGP;
+	}
+
+	dev_priv->usec_timeout = init->usec_timeout;
+	if (dev_priv->usec_timeout < 1 ||
+	    dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
+		DRM_DEBUG("TIMEOUT problem!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	/* Enable vblank on CRTC1 for older X servers
+	 */
+	dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+	dev_priv->do_boxes = 0;
+	dev_priv->cp_mode = init->cp_mode;
+
+	/* We don't support anything other than bus-mastering ring mode,
+	 * but the ring can be in either AGP or PCI space for the ring
+	 * read pointer.
+	 */
+	if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
+	    (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
+		DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	switch (init->fb_bpp) {
+	case 16:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+		break;
+	case 32:
+	default:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+		break;
+	}
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	dev_priv->ring_offset = init->ring_offset;
+	dev_priv->ring_rptr_offset = init->ring_rptr_offset;
+	dev_priv->buffers_offset = init->buffers_offset;
+	dev_priv->gart_textures_offset = init->gart_textures_offset;
+
+	master_priv->sarea = drm_getsarea(dev);
+	if (!master_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
+	if (!dev_priv->cp_ring) {
+		DRM_ERROR("could not find cp ring region!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+	if (!dev_priv->ring_rptr) {
+		DRM_ERROR("could not find ring read pointer!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		DRM_ERROR("could not find dma buffer region!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->gart_textures_offset) {
+		dev_priv->gart_textures =
+		    drm_core_findmap(dev, init->gart_textures_offset);
+		if (!dev_priv->gart_textures) {
+			DRM_ERROR("could not find GART texture region!\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	}
+
+#if __OS_HAS_AGP
+	/* XXX */
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		drm_core_ioremap_wc(dev_priv->cp_ring, dev);
+		drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
+		drm_core_ioremap_wc(dev->agp_buffer_map, dev);
+		if (!dev_priv->cp_ring->handle ||
+		    !dev_priv->ring_rptr->handle ||
+		    !dev->agp_buffer_map->handle) {
+			DRM_ERROR("could not find ioremap agp regions!\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	} else
+#endif
+	{
+		dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset;
+		dev_priv->ring_rptr->handle =
+			(void *)(unsigned long)dev_priv->ring_rptr->offset;
+		dev->agp_buffer_map->handle =
+			(void *)(unsigned long)dev->agp_buffer_map->offset;
+
+		DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
+			  dev_priv->cp_ring->handle);
+		DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
+			  dev_priv->ring_rptr->handle);
+		DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
+			  dev->agp_buffer_map->handle);
+	}
+
+	dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24;
+	dev_priv->fb_size =
+		(((radeon_read_fb_location(dev_priv) & 0xffff0000u) << 8) + 0x1000000)
+		- dev_priv->fb_location;
+
+	dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
+					((dev_priv->front_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
+				       ((dev_priv->back_offset
+					 + dev_priv->fb_location) >> 10));
+
+	dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
+					((dev_priv->depth_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->gart_size = init->gart_size;
+
+	/* New let's set the memory map ... */
+	if (dev_priv->new_memmap) {
+		u32 base = 0;
+
+		DRM_INFO("Setting GART location based on new memory map\n");
+
+		/* If using AGP, try to locate the AGP aperture at the same
+		 * location in the card and on the bus, though we have to
+		 * align it down.
+		 */
+#if __OS_HAS_AGP
+		/* XXX */
+		if (dev_priv->flags & RADEON_IS_AGP) {
+			base = dev->agp->base;
+			/* Check if valid */
+			if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
+			    base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
+				DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
+					 dev->agp->base);
+				base = 0;
+			}
+		}
+#endif
+		/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+		if (base == 0) {
+			base = dev_priv->fb_location + dev_priv->fb_size;
+			if (base < dev_priv->fb_location ||
+			    ((base + dev_priv->gart_size) & 0xfffffffful) < base)
+				base = dev_priv->fb_location
+					- dev_priv->gart_size;
+		}
+		dev_priv->gart_vm_start = base & 0xffc00000u;
+		if (dev_priv->gart_vm_start != base)
+			DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+				 base, dev_priv->gart_vm_start);
+	}
+
+#if __OS_HAS_AGP
+	/* XXX */
+	if (dev_priv->flags & RADEON_IS_AGP)
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+						 - dev->agp->base
+						 + dev_priv->gart_vm_start);
+	else
+#endif
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+						 - (unsigned long)dev->sg->vaddr
+						 + dev_priv->gart_vm_start);
+
+	DRM_DEBUG("fb 0x%08x size %d\n",
+		  (unsigned int) dev_priv->fb_location,
+		  (unsigned int) dev_priv->fb_size);
+	DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
+	DRM_DEBUG("dev_priv->gart_vm_start 0x%08x\n",
+		  (unsigned int) dev_priv->gart_vm_start);
+	DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n",
+		  dev_priv->gart_buffers_offset);
+
+	dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
+	dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+			      + init->ring_size / sizeof(u32));
+	dev_priv->ring.size = init->ring_size;
+	dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+	dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+	dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8);
+
+	dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+	dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16);
+
+	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+	dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* XXX turn off pcie gart */
+	} else
+#endif
+	{
+		dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+		/* if we have an offset set from userspace */
+		if (!dev_priv->pcigart_offset_set) {
+			DRM_ERROR("Need gart offset from userspace\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+
+		DRM_DEBUG("Using gart offset 0x%08lx\n", dev_priv->pcigart_offset);
+
+		dev_priv->gart_info.bus_addr =
+			dev_priv->pcigart_offset + dev_priv->fb_location;
+		dev_priv->gart_info.mapping.offset =
+			dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
+		dev_priv->gart_info.mapping.size =
+			dev_priv->gart_info.table_size;
+
+		drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
+		if (!dev_priv->gart_info.mapping.handle) {
+			DRM_ERROR("ioremap failed.\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+
+		dev_priv->gart_info.addr =
+			dev_priv->gart_info.mapping.handle;
+
+		DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
+			  dev_priv->gart_info.addr,
+			  dev_priv->pcigart_offset);
+
+		if (!r600_page_table_init(dev)) {
+			DRM_ERROR("Failed to init GART table\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+			r700_vm_init(dev);
+		else
+			r600_vm_init(dev);
+	}
+
+	if (!dev_priv->me_fw || !dev_priv->pfp_fw) {
+		int err = r600_cp_init_microcode(dev_priv);
+		if (err) {
+			DRM_ERROR("Failed to load firmware!\n");
+			r600_do_cleanup_cp(dev);
+			return err;
+		}
+	}
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+		r700_cp_load_microcode(dev_priv);
+	else
+		r600_cp_load_microcode(dev_priv);
+
+	r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+	dev_priv->last_buf = 0;
+
+	r600_do_engine_reset(dev);
+	r600_test_writeback(dev_priv);
+
+	return 0;
+}
+
+int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) {
+		r700_vm_init(dev);
+		r700_cp_load_microcode(dev_priv);
+	} else {
+		r600_vm_init(dev);
+		r600_cp_load_microcode(dev_priv);
+	}
+	r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
+	r600_do_engine_reset(dev);
+
+	return 0;
+}
+
+/* Wait for the CP to go idle.
+ */
+int r600_do_cp_idle(drm_radeon_private_t *dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(5);
+	OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+	OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+	/* wait for 3D idle clean */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+	OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
+	OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
+
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return r600_do_wait_for_idle(dev_priv);
+}
+
+/* Start the Command Processor.
+ */
+void r600_do_cp_start(drm_radeon_private_t *dev_priv)
+{
+	u32 cp_me;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(7);
+	OUT_RING(CP_PACKET3(R600_IT_ME_INITIALIZE, 5));
+	OUT_RING(0x00000001);
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770))
+		OUT_RING(0x00000003);
+	else
+		OUT_RING(0x00000000);
+	OUT_RING((dev_priv->r600_max_hw_contexts - 1));
+	OUT_RING(R600_ME_INITIALIZE_DEVICE_ID(1));
+	OUT_RING(0x00000000);
+	OUT_RING(0x00000000);
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	/* set the mux and reset the halt bit */
+	cp_me = 0xff;
+	RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
+
+	dev_priv->cp_running = 1;
+
+}
+
+void r600_do_cp_reset(drm_radeon_private_t *dev_priv)
+{
+	u32 cur_read_ptr;
+	DRM_DEBUG("\n");
+
+	cur_read_ptr = RADEON_READ(R600_CP_RB_RPTR);
+	RADEON_WRITE(R600_CP_RB_WPTR, cur_read_ptr);
+	SET_RING_HEAD(dev_priv, cur_read_ptr);
+	dev_priv->ring.tail = cur_read_ptr;
+}
+
+void r600_do_cp_stop(drm_radeon_private_t *dev_priv)
+{
+	uint32_t cp_me;
+
+	DRM_DEBUG("\n");
+
+	cp_me = 0xff | R600_CP_ME_HALT;
+
+	RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
+
+	dev_priv->cp_running = 0;
+}
+
+int r600_cp_dispatch_indirect(struct drm_device *dev,
+			      struct drm_buf *buf, int start, int end)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	if (start != end) {
+		unsigned long offset = (dev_priv->gart_buffers_offset
+					+ buf->offset + start);
+		int dwords = (end - start + 3) / sizeof(u32);
+
+		DRM_DEBUG("dwords:%d\n", dwords);
+		DRM_DEBUG("offset 0x%lx\n", offset);
+
+
+		/* Indirect buffer data must be a multiple of 16 dwords.
+		 * pad the data with a Type-2 CP packet.
+		 */
+		while (dwords & 0xf) {
+			u32 *data = (u32 *)
+			    ((char *)dev->agp_buffer_map->handle
+			     + buf->offset + start);
+			data[dwords++] = RADEON_CP_PACKET2;
+		}
+
+		/* Fire off the indirect buffer */
+		BEGIN_RING(4);
+		OUT_RING(CP_PACKET3(R600_IT_INDIRECT_BUFFER, 2));
+		OUT_RING((offset & 0xfffffffc));
+		OUT_RING((upper_32_bits(offset) & 0xff));
+		OUT_RING(dwords);
+		ADVANCE_RING();
+	}
+
+	return 0;
+}
+
+void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_master *master = file_priv->master;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int i, cpp, src_pitch, dst_pitch;
+	uint64_t src, dst;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	if (dev_priv->color_fmt == RADEON_COLOR_FORMAT_ARGB8888)
+		cpp = 4;
+	else
+		cpp = 2;
+
+	if (sarea_priv->pfCurrentPage == 0) {
+		src_pitch = dev_priv->back_pitch;
+		dst_pitch = dev_priv->front_pitch;
+		src = dev_priv->back_offset + dev_priv->fb_location;
+		dst = dev_priv->front_offset + dev_priv->fb_location;
+	} else {
+		src_pitch = dev_priv->front_pitch;
+		dst_pitch = dev_priv->back_pitch;
+		src = dev_priv->front_offset + dev_priv->fb_location;
+		dst = dev_priv->back_offset + dev_priv->fb_location;
+	}
+
+	if (r600_prepare_blit_copy(dev, file_priv)) {
+		DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
+		return;
+	}
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
+
+		r600_blit_swap(dev,
+			       src, dst,
+			       x, y, x, y, w, h,
+			       src_pitch, dst_pitch, cpp);
+	}
+	r600_done_blit_copy(dev);
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	sarea_priv->last_frame++;
+
+	BEGIN_RING(3);
+	R600_FRAME_AGE(sarea_priv->last_frame);
+	ADVANCE_RING();
+}
+
+int r600_cp_dispatch_texture(struct drm_device *dev,
+			     struct drm_file *file_priv,
+			     drm_radeon_texture_t *tex,
+			     drm_radeon_tex_image_t *image)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	u32 *buffer;
+	const u8 __user *data;
+	int size, pass_size;
+	u64 src_offset, dst_offset;
+
+	if (!radeon_check_offset(dev_priv, tex->offset)) {
+		DRM_ERROR("Invalid destination offset\n");
+		return -EINVAL;
+	}
+
+	/* this might fail for zero-sized uploads - are those illegal? */
+	if (!radeon_check_offset(dev_priv, tex->offset + tex->height * tex->pitch - 1)) {
+		DRM_ERROR("Invalid final destination offset\n");
+		return -EINVAL;
+	}
+
+	size = tex->height * tex->pitch;
+
+	if (size == 0)
+		return 0;
+
+	dst_offset = tex->offset;
+
+	if (r600_prepare_blit_copy(dev, file_priv)) {
+		DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
+		return -EAGAIN;
+	}
+	do {
+		data = (const u8 __user *)image->data;
+		pass_size = size;
+
+		buf = radeon_freelist_get(dev);
+		if (!buf) {
+			DRM_DEBUG("EAGAIN\n");
+			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+				return -EFAULT;
+			return -EAGAIN;
+		}
+
+		if (pass_size > buf->total)
+			pass_size = buf->total;
+
+		/* Dispatch the indirect buffer.
+		 */
+		buffer =
+		    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+
+		if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
+			DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
+			return -EFAULT;
+		}
+
+		buf->file_priv = file_priv;
+		buf->used = pass_size;
+		src_offset = dev_priv->gart_buffers_offset + buf->offset;
+
+		r600_blit_copy(dev, src_offset, dst_offset, pass_size);
+
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+
+		/* Update the input parameters for next time */
+		image->data = (const u8 __user *)image->data + pass_size;
+		dst_offset += pass_size;
+		size -= pass_size;
+	} while (size > 0);
+	r600_done_blit_copy(dev);
+
+	return 0;
+}
+
+/*
+ * Legacy cs ioctl
+ */
+static u32 radeon_cs_id_get(struct drm_radeon_private *radeon)
+{
+	/* FIXME: check if wrap affect last reported wrap & sequence */
+	radeon->cs_id_scnt = (radeon->cs_id_scnt + 1) & 0x00FFFFFF;
+	if (!radeon->cs_id_scnt) {
+		/* increment wrap counter */
+		radeon->cs_id_wcnt += 0x01000000;
+		/* valid sequence counter start at 1 */
+		radeon->cs_id_scnt = 1;
+	}
+	return (radeon->cs_id_scnt | radeon->cs_id_wcnt);
+}
+
+static void r600_cs_id_emit(drm_radeon_private_t *dev_priv, u32 *id)
+{
+	RING_LOCALS;
+
+	*id = radeon_cs_id_get(dev_priv);
+
+	/* SCRATCH 2 */
+	BEGIN_RING(3);
+	R600_CLEAR_AGE(*id);
+	ADVANCE_RING();
+	COMMIT_RING();
+}
+
+static int r600_ib_get(struct drm_device *dev,
+			struct drm_file *fpriv,
+			struct drm_buf **buffer)
+{
+	struct drm_buf *buf;
+
+	*buffer = NULL;
+	buf = radeon_freelist_get(dev);
+	if (!buf) {
+		return -EBUSY;
+	}
+	buf->file_priv = fpriv;
+	*buffer = buf;
+	return 0;
+}
+
+static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf,
+			struct drm_file *fpriv, int l, int r)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (buf) {
+		if (!r)
+			r600_cp_dispatch_indirect(dev, buf, 0, l * 4);
+		radeon_cp_discard_buffer(dev, fpriv->master, buf);
+		COMMIT_RING();
+	}
+}
+
+int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
+{
+	struct drm_radeon_private *dev_priv = dev->dev_private;
+	struct drm_radeon_cs *cs = data;
+	struct drm_buf *buf;
+	unsigned family;
+	int l, r = 0;
+	u32 *ib, cs_id = 0;
+
+	if (dev_priv == NULL) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+	family = dev_priv->flags & RADEON_FAMILY_MASK;
+	if (family < CHIP_R600) {
+		DRM_ERROR("cs ioctl valid only for R6XX & R7XX in legacy mode\n");
+		return -EINVAL;
+	}
+	sx_xlock(&dev_priv->cs_mutex);
+	/* get ib */
+	l = 0;
+	r = r600_ib_get(dev, fpriv, &buf);
+	if (r) {
+		DRM_ERROR("ib_get failed\n");
+		goto out;
+	}
+	ib = (u32 *)((uintptr_t)dev->agp_buffer_map->handle + buf->offset);
+	/* now parse command stream */
+	r = r600_cs_legacy(dev, data,  fpriv, family, ib, &l);
+	if (r) {
+		goto out;
+	}
+
+out:
+	r600_ib_free(dev, buf, fpriv, l, r);
+	/* emit cs id sequence */
+	r600_cs_id_emit(dev_priv, &cs_id);
+	cs->cs_id = cs_id;
+	sx_xunlock(&dev_priv->cs_mutex);
+	return r;
+}
+
+void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
+{
+	struct drm_radeon_private *dev_priv = dev->dev_private;
+
+	*npipes = dev_priv->r600_npipes;
+	*nbanks = dev_priv->r600_nbanks;
+	*group_size = dev_priv->r600_group_size;
+}


Property changes on: trunk/sys/dev/drm2/radeon/r600_cp.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_cp.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_cp.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_cp.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,16 @@
+/* $MidnightBSD$ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_cp.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef __R600_CP_H__
+#define	__R600_CP_H__
+
+void	r600_cs_legacy_get_tiling_conf(struct drm_device *dev,
+	    u32 *npipes, u32 *nbanks, u32 *group_size);
+
+int	r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
+	    unsigned family, u32 *ib, int *l);
+void	r600_cs_legacy_init(void);
+
+#endif /* !defined(__R600_CP_H__) */


Property changes on: trunk/sys/dev/drm2/radeon/r600_cp.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_cs.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_cs.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_cs.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2765 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_cs.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+#include "r600_reg_safe.h"
+#include "r600_cp.h"
+#include "r600_cs.h"
+
+static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
+					struct radeon_cs_reloc **cs_reloc);
+static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
+					struct radeon_cs_reloc **cs_reloc);
+typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
+static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
+#endif
+
+
+struct r600_cs_track {
+	/* configuration we miror so that we use same code btw kms/ums */
+	u32			group_size;
+	u32			nbanks;
+	u32			npipes;
+	/* value we track */
+	u32			sq_config;
+	u32			log_nsamples;
+	u32			nsamples;
+	u32			cb_color_base_last[8];
+	struct radeon_bo	*cb_color_bo[8];
+	u64			cb_color_bo_mc[8];
+	u64			cb_color_bo_offset[8];
+	struct radeon_bo	*cb_color_frag_bo[8];
+	u64			cb_color_frag_offset[8];
+	struct radeon_bo	*cb_color_tile_bo[8];
+	u64			cb_color_tile_offset[8];
+	u32			cb_color_mask[8];
+	u32			cb_color_info[8];
+	u32			cb_color_view[8];
+	u32			cb_color_size_idx[8]; /* unused */
+	u32			cb_target_mask;
+	u32			cb_shader_mask;  /* unused */
+	bool			is_resolve;
+	u32			cb_color_size[8];
+	u32			vgt_strmout_en;
+	u32			vgt_strmout_buffer_en;
+	struct radeon_bo	*vgt_strmout_bo[4];
+	u64			vgt_strmout_bo_mc[4]; /* unused */
+	u32			vgt_strmout_bo_offset[4];
+	u32			vgt_strmout_size[4];
+	u32			db_depth_control;
+	u32			db_depth_info;
+	u32			db_depth_size_idx;
+	u32			db_depth_view;
+	u32			db_depth_size;
+	u32			db_offset;
+	struct radeon_bo	*db_bo;
+	u64			db_bo_mc;
+	bool			sx_misc_kill_all_prims;
+	bool			cb_dirty;
+	bool			db_dirty;
+	bool			streamout_dirty;
+	struct radeon_bo	*htile_bo;
+	u64			htile_offset;
+	u32			htile_surface;
+};
+
+#define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
+#define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
+#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
+#define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
+#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
+#define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
+#define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
+
+struct gpu_formats {
+	unsigned blockwidth;
+	unsigned blockheight;
+	unsigned blocksize;
+	unsigned valid_color;
+	enum radeon_family min_family;
+};
+
+static const struct gpu_formats color_formats_table[] = {
+	/* 8 bit */
+	FMT_8_BIT(V_038004_COLOR_8, 1),
+	FMT_8_BIT(V_038004_COLOR_4_4, 1),
+	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
+	FMT_8_BIT(V_038004_FMT_1, 0),
+
+	/* 16-bit */
+	FMT_16_BIT(V_038004_COLOR_16, 1),
+	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
+	FMT_16_BIT(V_038004_COLOR_8_8, 1),
+	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
+	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
+	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
+	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
+	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
+
+	/* 24-bit */
+	FMT_24_BIT(V_038004_FMT_8_8_8),
+
+	/* 32-bit */
+	FMT_32_BIT(V_038004_COLOR_32, 1),
+	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_16_16, 1),
+	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_8_24, 1),
+	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_24_8, 1),
+	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
+	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
+	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
+	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
+	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
+	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
+	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
+	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
+
+	/* 48-bit */
+	FMT_48_BIT(V_038004_FMT_16_16_16),
+	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
+
+	/* 64-bit */
+	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
+	FMT_64_BIT(V_038004_COLOR_32_32, 1),
+	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
+	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
+	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
+
+	FMT_96_BIT(V_038004_FMT_32_32_32),
+	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
+
+	/* 128-bit */
+	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
+	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
+
+	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
+	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
+
+	/* block compressed formats */
+	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
+	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
+	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
+	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
+	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
+	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+
+	/* The other Evergreen formats */
+	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
+};
+
+bool r600_fmt_is_valid_color(u32 format)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return false;
+
+	if (color_formats_table[format].valid_color)
+		return true;
+
+	return false;
+}
+
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return false;
+
+	if (family < color_formats_table[format].min_family)
+		return false;
+
+	if (color_formats_table[format].blockwidth > 0)
+		return true;
+
+	return false;
+}
+
+int r600_fmt_get_blocksize(u32 format)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	return color_formats_table[format].blocksize;
+}
+
+int r600_fmt_get_nblocksx(u32 format, u32 w)
+{
+	unsigned bw;
+
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	bw = color_formats_table[format].blockwidth;
+	if (bw == 0)
+		return 0;
+
+	return (w + bw - 1) / bw;
+}
+
+int r600_fmt_get_nblocksy(u32 format, u32 h)
+{
+	unsigned bh;
+
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	bh = color_formats_table[format].blockheight;
+	if (bh == 0)
+		return 0;
+
+	return (h + bh - 1) / bh;
+}
+
+struct array_mode_checker {
+	int array_mode;
+	u32 group_size;
+	u32 nbanks;
+	u32 npipes;
+	u32 nsamples;
+	u32 blocksize;
+};
+
+/* returns alignment in pixels for pitch/height/depth and bytes for base */
+static int r600_get_array_mode_alignment(struct array_mode_checker *values,
+						u32 *pitch_align,
+						u32 *height_align,
+						u32 *depth_align,
+						u64 *base_align)
+{
+	u32 tile_width = 8;
+	u32 tile_height = 8;
+	u32 macro_tile_width = values->nbanks;
+	u32 macro_tile_height = values->npipes;
+	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
+	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
+
+	switch (values->array_mode) {
+	case ARRAY_LINEAR_GENERAL:
+		/* technically tile_width/_height for pitch/height */
+		*pitch_align = 1; /* tile_width */
+		*height_align = 1; /* tile_height */
+		*depth_align = 1;
+		*base_align = 1;
+		break;
+	case ARRAY_LINEAR_ALIGNED:
+		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
+		*height_align = 1;
+		*depth_align = 1;
+		*base_align = values->group_size;
+		break;
+	case ARRAY_1D_TILED_THIN1:
+		*pitch_align = max((u32)tile_width,
+				   (u32)(values->group_size /
+					 (tile_height * values->blocksize * values->nsamples)));
+		*height_align = tile_height;
+		*depth_align = 1;
+		*base_align = values->group_size;
+		break;
+	case ARRAY_2D_TILED_THIN1:
+		*pitch_align = max((u32)macro_tile_width * tile_width,
+				(u32)((values->group_size * values->nbanks) /
+				(values->blocksize * values->nsamples * tile_width)));
+		*height_align = macro_tile_height * tile_height;
+		*depth_align = 1;
+		*base_align = max(macro_tile_bytes,
+				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void r600_cs_track_init(struct r600_cs_track *track)
+{
+	int i;
+
+	/* assume DX9 mode */
+	track->sq_config = DX9_CONSTS;
+	for (i = 0; i < 8; i++) {
+		track->cb_color_base_last[i] = 0;
+		track->cb_color_size[i] = 0;
+		track->cb_color_size_idx[i] = 0;
+		track->cb_color_info[i] = 0;
+		track->cb_color_view[i] = 0xFFFFFFFF;
+		track->cb_color_bo[i] = NULL;
+		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
+		track->cb_color_frag_bo[i] = NULL;
+		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
+		track->cb_color_tile_bo[i] = NULL;
+		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
+		track->cb_color_mask[i] = 0xFFFFFFFF;
+	}
+	track->is_resolve = false;
+	track->nsamples = 16;
+	track->log_nsamples = 4;
+	track->cb_target_mask = 0xFFFFFFFF;
+	track->cb_shader_mask = 0xFFFFFFFF;
+	track->cb_dirty = true;
+	track->db_bo = NULL;
+	track->db_bo_mc = 0xFFFFFFFF;
+	/* assume the biggest format and that htile is enabled */
+	track->db_depth_info = 7 | (1 << 25);
+	track->db_depth_view = 0xFFFFC000;
+	track->db_depth_size = 0xFFFFFFFF;
+	track->db_depth_size_idx = 0;
+	track->db_depth_control = 0xFFFFFFFF;
+	track->db_dirty = true;
+	track->htile_bo = NULL;
+	track->htile_offset = 0xFFFFFFFF;
+	track->htile_surface = 0;
+
+	for (i = 0; i < 4; i++) {
+		track->vgt_strmout_size[i] = 0;
+		track->vgt_strmout_bo[i] = NULL;
+		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
+	}
+	track->streamout_dirty = true;
+	track->sx_misc_kill_all_prims = false;
+}
+
+static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+	struct r600_cs_track *track = p->track;
+	u32 slice_tile_max, size, tmp;
+	u32 height, height_align, pitch, pitch_align, depth_align;
+	u64 base_offset, base_align;
+	struct array_mode_checker array_check;
+	volatile u32 *ib = p->ib.ptr;
+	unsigned array_mode;
+	u32 format;
+	/* When resolve is used, the second colorbuffer has always 1 sample. */
+	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
+
+	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
+	format = G_0280A0_FORMAT(track->cb_color_info[i]);
+	if (!r600_fmt_is_valid_color(format)) {
+		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+			 __func__, __LINE__, format,
+			i, track->cb_color_info[i]);
+		return -EINVAL;
+	}
+	/* pitch in pixels */
+	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
+	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
+	slice_tile_max *= 64;
+	height = slice_tile_max / pitch;
+	if (height > 8192)
+		height = 8192;
+	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
+
+	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
+	array_check.array_mode = array_mode;
+	array_check.group_size = track->group_size;
+	array_check.nbanks = track->nbanks;
+	array_check.npipes = track->npipes;
+	array_check.nsamples = nsamples;
+	array_check.blocksize = r600_fmt_get_blocksize(format);
+	if (r600_get_array_mode_alignment(&array_check,
+					  &pitch_align, &height_align, &depth_align, &base_align)) {
+		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+			 track->cb_color_info[i]);
+		return -EINVAL;
+	}
+	switch (array_mode) {
+	case V_0280A0_ARRAY_LINEAR_GENERAL:
+		break;
+	case V_0280A0_ARRAY_LINEAR_ALIGNED:
+		break;
+	case V_0280A0_ARRAY_1D_TILED_THIN1:
+		/* avoid breaking userspace */
+		if (height > 7)
+			height &= ~0x7;
+		break;
+	case V_0280A0_ARRAY_2D_TILED_THIN1:
+		break;
+	default:
+		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+			track->cb_color_info[i]);
+		return -EINVAL;
+	}
+
+	if (!IS_ALIGNED(pitch, pitch_align)) {
+		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, array_mode);
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(height, height_align)) {
+		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, height, height_align, array_mode);
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(base_offset, base_align)) {
+		dev_warn(p->dev, "%s offset[%d] 0x%jx 0x%jx, %d not aligned\n", __func__, i,
+			 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode);
+		return -EINVAL;
+	}
+
+	/* check offset */
+	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
+	      r600_fmt_get_blocksize(format) * nsamples;
+	switch (array_mode) {
+	default:
+	case V_0280A0_ARRAY_LINEAR_GENERAL:
+	case V_0280A0_ARRAY_LINEAR_ALIGNED:
+		tmp += track->cb_color_view[i] & 0xFF;
+		break;
+	case V_0280A0_ARRAY_1D_TILED_THIN1:
+	case V_0280A0_ARRAY_2D_TILED_THIN1:
+		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
+		break;
+	}
+	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
+			/* the initial DDX does bad things with the CB size occasionally */
+			/* it rounds up height too far for slice tile max but the BO is smaller */
+			/* r600c,g also seem to flush at bad times in some apps resulting in
+			 * bogus values here. So for linear just allow anything to avoid breaking
+			 * broken userspace.
+			 */
+		} else {
+			dev_warn(p->dev, "%s offset[%d] %d %ju %d %lu too big (%d %d) (%d %d %d)\n",
+				 __func__, i, array_mode,
+				 (uintmax_t)track->cb_color_bo_offset[i], tmp,
+				 radeon_bo_size(track->cb_color_bo[i]),
+				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
+				 r600_fmt_get_nblocksy(format, height),
+				 r600_fmt_get_blocksize(format));
+			return -EINVAL;
+		}
+	}
+	/* limit max tile */
+	tmp = (height * pitch) >> 6;
+	if (tmp < slice_tile_max)
+		slice_tile_max = tmp;
+	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
+		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
+	ib[track->cb_color_size_idx[i]] = tmp;
+
+	/* FMASK/CMASK */
+	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+	case V_0280A0_TILE_DISABLE:
+		break;
+	case V_0280A0_FRAG_ENABLE:
+		if (track->nsamples > 1) {
+			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
+			/* the tile size is 8x8, but the size is in units of bits.
+			 * for bytes, do just * 8. */
+			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
+
+			if (bytes + track->cb_color_frag_offset[i] >
+			    radeon_bo_size(track->cb_color_frag_bo[i])) {
+				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
+					 "(tile_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n",
+					 __func__, tile_max, bytes,
+					 (uintmax_t)track->cb_color_frag_offset[i],
+					 radeon_bo_size(track->cb_color_frag_bo[i]));
+				return -EINVAL;
+			}
+		}
+		/* fall through */
+	case V_0280A0_CLEAR_ENABLE:
+	{
+		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
+		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
+		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
+		uint32_t bytes = (block_max + 1) * 128;
+
+		if (bytes + track->cb_color_tile_offset[i] >
+		    radeon_bo_size(track->cb_color_tile_bo[i])) {
+			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
+				 "(block_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n",
+				 __func__, block_max, bytes,
+				 (uintmax_t)track->cb_color_tile_offset[i],
+				 radeon_bo_size(track->cb_color_tile_bo[i]));
+			return -EINVAL;
+		}
+		break;
+	}
+	default:
+		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
+{
+	struct r600_cs_track *track = p->track;
+	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+	u32 height_align, pitch_align, depth_align;
+	u32 pitch = 8192;
+	u32 height = 8192;
+	u64 base_offset, base_align;
+	struct array_mode_checker array_check;
+	int array_mode;
+	volatile u32 *ib = p->ib.ptr;
+
+
+	if (track->db_bo == NULL) {
+		dev_warn(p->dev, "z/stencil with no depth buffer\n");
+		return -EINVAL;
+	}
+	switch (G_028010_FORMAT(track->db_depth_info)) {
+	case V_028010_DEPTH_16:
+		bpe = 2;
+		break;
+	case V_028010_DEPTH_X8_24:
+	case V_028010_DEPTH_8_24:
+	case V_028010_DEPTH_X8_24_FLOAT:
+	case V_028010_DEPTH_8_24_FLOAT:
+	case V_028010_DEPTH_32_FLOAT:
+		bpe = 4;
+		break;
+	case V_028010_DEPTH_X24_8_32_FLOAT:
+		bpe = 8;
+		break;
+	default:
+		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+		return -EINVAL;
+	}
+	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+		if (!track->db_depth_size_idx) {
+			dev_warn(p->dev, "z/stencil buffer size not set\n");
+			return -EINVAL;
+		}
+		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+		tmp = (tmp / bpe) >> 6;
+		if (!tmp) {
+			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+					track->db_depth_size, bpe, track->db_offset,
+					radeon_bo_size(track->db_bo));
+			return -EINVAL;
+		}
+		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+	} else {
+		size = radeon_bo_size(track->db_bo);
+		/* pitch in pixels */
+		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+		slice_tile_max *= 64;
+		height = slice_tile_max / pitch;
+		if (height > 8192)
+			height = 8192;
+		base_offset = track->db_bo_mc + track->db_offset;
+		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+		array_check.array_mode = array_mode;
+		array_check.group_size = track->group_size;
+		array_check.nbanks = track->nbanks;
+		array_check.npipes = track->npipes;
+		array_check.nsamples = track->nsamples;
+		array_check.blocksize = bpe;
+		if (r600_get_array_mode_alignment(&array_check,
+					&pitch_align, &height_align, &depth_align, &base_align)) {
+			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+					G_028010_ARRAY_MODE(track->db_depth_info),
+					track->db_depth_info);
+			return -EINVAL;
+		}
+		switch (array_mode) {
+		case V_028010_ARRAY_1D_TILED_THIN1:
+			/* don't break userspace */
+			height &= ~0x7;
+			break;
+		case V_028010_ARRAY_2D_TILED_THIN1:
+			break;
+		default:
+			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+					G_028010_ARRAY_MODE(track->db_depth_info),
+					track->db_depth_info);
+			return -EINVAL;
+		}
+
+		if (!IS_ALIGNED(pitch, pitch_align)) {
+			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+					__func__, __LINE__, pitch, pitch_align, array_mode);
+			return -EINVAL;
+		}
+		if (!IS_ALIGNED(height, height_align)) {
+			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+					__func__, __LINE__, height, height_align, array_mode);
+			return -EINVAL;
+		}
+		if (!IS_ALIGNED(base_offset, base_align)) {
+			dev_warn(p->dev, "%s offset 0x%jx, 0x%jx, %d not aligned\n", __func__,
+					(uintmax_t)base_offset, (uintmax_t)base_align, array_mode);
+			return -EINVAL;
+		}
+
+		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
+		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+					array_mode,
+					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+					radeon_bo_size(track->db_bo));
+			return -EINVAL;
+		}
+	}
+
+	/* hyperz */
+	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+		unsigned long size;
+		unsigned nbx, nby;
+
+		if (track->htile_bo == NULL) {
+			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+				 __func__, __LINE__, track->db_depth_info);
+			return -EINVAL;
+		}
+		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+				 __func__, __LINE__, track->db_depth_size);
+			return -EINVAL;
+		}
+
+		nbx = pitch;
+		nby = height;
+		if (G_028D24_LINEAR(track->htile_surface)) {
+			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
+			nbx = roundup2(nbx, 16 * 8);
+			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
+			nby = roundup(nby, track->npipes * 8);
+		} else {
+			/* always assume 8x8 htile */
+			/* align is htile align * 8, htile align vary according to
+			 * number of pipe and tile width and nby
+			 */
+			switch (track->npipes) {
+			case 8:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = roundup2(nbx, 64 * 8);
+				nby = roundup2(nby, 64 * 8);
+				break;
+			case 4:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = roundup2(nbx, 64 * 8);
+				nby = roundup2(nby, 32 * 8);
+				break;
+			case 2:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = roundup2(nbx, 32 * 8);
+				nby = roundup2(nby, 32 * 8);
+				break;
+			case 1:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = roundup2(nbx, 32 * 8);
+				nby = roundup2(nby, 16 * 8);
+				break;
+			default:
+				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+					 __func__, __LINE__, track->npipes);
+				return -EINVAL;
+			}
+		}
+		/* compute number of htile */
+		nbx = nbx >> 3;
+		nby = nby >> 3;
+		/* size must be aligned on npipes * 2K boundary */
+		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
+		size += track->htile_offset;
+
+		if (size > radeon_bo_size(track->htile_bo)) {
+			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
+				 size, nbx, nby);
+			return -EINVAL;
+		}
+	}
+
+	track->db_dirty = false;
+	return 0;
+}
+
+static int r600_cs_track_check(struct radeon_cs_parser *p)
+{
+	struct r600_cs_track *track = p->track;
+	u32 tmp;
+	int r, i;
+
+	/* on legacy kernel we don't perform advanced check */
+	if (p->rdev == NULL)
+		return 0;
+
+	/* check streamout */
+	if (track->streamout_dirty && track->vgt_strmout_en) {
+		for (i = 0; i < 4; i++) {
+			if (track->vgt_strmout_buffer_en & (1 << i)) {
+				if (track->vgt_strmout_bo[i]) {
+					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+						(u64)track->vgt_strmout_size[i];
+					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+						DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n",
+							  i, (uintmax_t)offset,
+							  radeon_bo_size(track->vgt_strmout_bo[i]));
+						return -EINVAL;
+					}
+				} else {
+					dev_warn(p->dev, "No buffer for streamout %d\n", i);
+					return -EINVAL;
+				}
+			}
+		}
+		track->streamout_dirty = false;
+	}
+
+	if (track->sx_misc_kill_all_prims)
+		return 0;
+
+	/* check that we have a cb for each enabled target, we don't check
+	 * shader_mask because it seems mesa isn't always setting it :(
+	 */
+	if (track->cb_dirty) {
+		tmp = track->cb_target_mask;
+
+		/* We must check both colorbuffers for RESOLVE. */
+		if (track->is_resolve) {
+			tmp |= 0xff;
+		}
+
+		for (i = 0; i < 8; i++) {
+			if ((tmp >> (i * 4)) & 0xF) {
+				/* at least one component is enabled */
+				if (track->cb_color_bo[i] == NULL) {
+					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+					return -EINVAL;
+				}
+				/* perform rewrite of CB_COLOR[0-7]_SIZE */
+				r = r600_cs_track_validate_cb(p, i);
+				if (r)
+					return r;
+			}
+		}
+		track->cb_dirty = false;
+	}
+
+	/* Check depth buffer */
+	if (track->db_dirty &&
+	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
+	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+	     G_028800_Z_ENABLE(track->db_depth_control))) {
+		r = r600_cs_track_validate_db(p);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+/**
+ * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser:	parser structure holding parsing context.
+ * @pkt:	where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+static int r600_cs_packet_parse(struct radeon_cs_parser *p,
+			struct radeon_cs_packet *pkt,
+			unsigned idx)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	uint32_t header;
+
+	if (idx >= ib_chunk->length_dw) {
+		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+			  idx, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	header = radeon_get_ib_value(p, idx);
+	pkt->idx = idx;
+	pkt->type = CP_PACKET_GET_TYPE(header);
+	pkt->count = CP_PACKET_GET_COUNT(header);
+	pkt->one_reg_wr = 0;
+	switch (pkt->type) {
+	case PACKET_TYPE0:
+		pkt->reg = CP_PACKET0_GET_REG(header);
+		break;
+	case PACKET_TYPE3:
+		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+		break;
+	case PACKET_TYPE2:
+		pkt->count = -1;
+		break;
+	default:
+		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+		return -EINVAL;
+	}
+	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
+ * @parser:		parser structure holding parsing context.
+ * @data:		pointer to relocation data
+ * @offset_start:	starting offset
+ * @offset_mask:	offset mask (to align start offset on)
+ * @reloc:		reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
+					struct radeon_cs_reloc **cs_reloc)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_cs_packet p3reloc;
+	unsigned idx;
+	int r;
+
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	*cs_reloc = NULL;
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r) {
+		return r;
+	}
+	p->idx += p3reloc.count + 2;
+	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+			  p3reloc.idx);
+		return -EINVAL;
+	}
+	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		return -EINVAL;
+	}
+	/* FIXME: we assume reloc size is 4 dwords */
+	*cs_reloc = p->relocs_ptr[(idx / 4)];
+	return 0;
+}
+
+/**
+ * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
+ * @parser:		parser structure holding parsing context.
+ * @data:		pointer to relocation data
+ * @offset_start:	starting offset
+ * @offset_mask:	offset mask (to align start offset on)
+ * @reloc:		reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
+					struct radeon_cs_reloc **cs_reloc)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_cs_packet p3reloc;
+	unsigned idx;
+	int r;
+
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	*cs_reloc = NULL;
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r) {
+		return r;
+	}
+	p->idx += p3reloc.count + 2;
+	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+			  p3reloc.idx);
+		return -EINVAL;
+	}
+	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		return -EINVAL;
+	}
+	*cs_reloc = p->relocs;
+	(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
+	(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+	return 0;
+}
+
+/**
+ * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser:		parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet p3reloc;
+	int r;
+
+	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r) {
+		return 0;
+	}
+	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+		return 0;
+	}
+	return 1;
+}
+
+/**
+ * r600_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_cs_packet p3reloc, wait_reg_mem;
+	int crtc_id;
+	int r;
+	uint32_t header, h_idx, reg, wait_reg_mem_info;
+	volatile uint32_t *ib;
+
+	ib = p->ib.ptr;
+
+	/* parse the WAIT_REG_MEM */
+	r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
+	if (r)
+		return r;
+
+	/* check its a WAIT_REG_MEM */
+	if (wait_reg_mem.type != PACKET_TYPE3 ||
+	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+		return -EINVAL;
+	}
+
+	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+	/* bit 4 is reg (0) or mem (1) */
+	if (wait_reg_mem_info & 0x10) {
+		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+		return -EINVAL;
+	}
+	/* waiting for value to be equal */
+	if ((wait_reg_mem_info & 0x7) != 0x3) {
+		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+		return -EINVAL;
+	}
+	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
+		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+		return -EINVAL;
+	}
+
+	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
+		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+		return -EINVAL;
+	}
+
+	/* jump over the NOP */
+	r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+	if (r)
+		return r;
+
+	h_idx = p->idx - 2;
+	p->idx += wait_reg_mem.count + 2;
+	p->idx += p3reloc.count + 2;
+
+	header = radeon_get_ib_value(p, h_idx);
+	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+	reg = CP_PACKET0_GET_REG(header);
+
+	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		DRM_ERROR("cannot find crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+	crtc = obj_to_crtc(obj);
+	radeon_crtc = to_radeon_crtc(crtc);
+	crtc_id = radeon_crtc->crtc_id;
+
+	if (!crtc->enabled) {
+		/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+		ib[h_idx + 2] = PACKET2(0);
+		ib[h_idx + 3] = PACKET2(0);
+		ib[h_idx + 4] = PACKET2(0);
+		ib[h_idx + 5] = PACKET2(0);
+		ib[h_idx + 6] = PACKET2(0);
+		ib[h_idx + 7] = PACKET2(0);
+		ib[h_idx + 8] = PACKET2(0);
+	} else if (crtc_id == 1) {
+		switch (reg) {
+		case AVIVO_D1MODE_VLINE_START_END:
+			header &= ~R600_CP_PACKET0_REG_MASK;
+			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
+			break;
+		default:
+			DRM_ERROR("unknown crtc reloc\n");
+			return -EINVAL;
+		}
+		ib[h_idx] = header;
+		ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
+	}
+
+	return 0;
+}
+
+static int r600_packet0_check(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt,
+				unsigned idx, unsigned reg)
+{
+	int r;
+
+	switch (reg) {
+	case AVIVO_D1MODE_VLINE_START_END:
+		r = r600_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			return r;
+		}
+		break;
+	default:
+		DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
+		       reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt)
+{
+	unsigned reg, i;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+		r = r600_packet0_check(p, pkt, idx, reg);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * r600_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against r600_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
+	struct radeon_cs_reloc *reloc;
+	u32 m, i, tmp, *ib;
+	int r;
+
+	i = (reg >> 7);
+	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (!(r600_reg_safe_bm[i] & m))
+		return 0;
+	ib = p->ib.ptr;
+	switch (reg) {
+	/* force following reg to 0 in an attempt to disable out buffer
+	 * which will need us to better understand how it works to perform
+	 * security check on it (Jerome)
+	 */
+	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
+	case R_008C44_SQ_ESGS_RING_SIZE:
+	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
+	case R_008C54_SQ_ESTMP_RING_SIZE:
+	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
+	case R_008C74_SQ_FBUF_RING_SIZE:
+	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
+	case R_008C5C_SQ_GSTMP_RING_SIZE:
+	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
+	case R_008C4C_SQ_GSVS_RING_SIZE:
+	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
+	case R_008C6C_SQ_PSTMP_RING_SIZE:
+	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
+	case R_008C7C_SQ_REDUC_RING_SIZE:
+	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
+	case R_008C64_SQ_VSTMP_RING_SIZE:
+	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
+		/* get value to populate the IB don't remove */
+		tmp =radeon_get_ib_value(p, idx);
+		ib[idx] = 0;
+		break;
+	case SQ_CONFIG:
+		track->sq_config = radeon_get_ib_value(p, idx);
+		break;
+	case R_028800_DB_DEPTH_CONTROL:
+		track->db_depth_control = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_028010_DB_DEPTH_INFO:
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+		    r600_cs_packet_next_is_pkt3_nop(p)) {
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					 "0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->db_depth_info = radeon_get_ib_value(p, idx);
+			ib[idx] &= C_028010_ARRAY_MODE;
+			track->db_depth_info &= C_028010_ARRAY_MODE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+			} else {
+				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+			}
+		} else {
+			track->db_depth_info = radeon_get_ib_value(p, idx);
+		}
+		track->db_dirty = true;
+		break;
+	case R_028004_DB_DEPTH_VIEW:
+		track->db_depth_view = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_028000_DB_DEPTH_SIZE:
+		track->db_depth_size = radeon_get_ib_value(p, idx);
+		track->db_depth_size_idx = idx;
+		track->db_dirty = true;
+		break;
+	case R_028AB0_VGT_STRMOUT_EN:
+		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case R_028B20_VGT_STRMOUT_BUFFER_EN:
+		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_BASE_0:
+	case VGT_STRMOUT_BUFFER_BASE_1:
+	case VGT_STRMOUT_BUFFER_BASE_2:
+	case VGT_STRMOUT_BUFFER_BASE_3:
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->vgt_strmout_bo[tmp] = reloc->robj;
+		track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_SIZE_0:
+	case VGT_STRMOUT_BUFFER_SIZE_1:
+	case VGT_STRMOUT_BUFFER_SIZE_2:
+	case VGT_STRMOUT_BUFFER_SIZE_3:
+		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+		/* size in register is DWs, convert to bytes */
+		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+		track->streamout_dirty = true;
+		break;
+	case CP_COHER_BASE:
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case R_028238_CB_TARGET_MASK:
+		track->cb_target_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case R_02823C_CB_SHADER_MASK:
+		track->cb_shader_mask = radeon_get_ib_value(p, idx);
+		break;
+	case R_028C04_PA_SC_AA_CONFIG:
+		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+		track->log_nsamples = tmp;
+		track->nsamples = 1 << tmp;
+		track->cb_dirty = true;
+		break;
+	case R_028808_CB_COLOR_CONTROL:
+		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
+		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
+		track->cb_dirty = true;
+		break;
+	case R_0280A0_CB_COLOR0_INFO:
+	case R_0280A4_CB_COLOR1_INFO:
+	case R_0280A8_CB_COLOR2_INFO:
+	case R_0280AC_CB_COLOR3_INFO:
+	case R_0280B0_CB_COLOR4_INFO:
+	case R_0280B4_CB_COLOR5_INFO:
+	case R_0280B8_CB_COLOR6_INFO:
+	case R_0280BC_CB_COLOR7_INFO:
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+		     r600_cs_packet_next_is_pkt3_nop(p)) {
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+			}
+		} else {
+			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		}
+		track->cb_dirty = true;
+		break;
+	case R_028080_CB_COLOR0_VIEW:
+	case R_028084_CB_COLOR1_VIEW:
+	case R_028088_CB_COLOR2_VIEW:
+	case R_02808C_CB_COLOR3_VIEW:
+	case R_028090_CB_COLOR4_VIEW:
+	case R_028094_CB_COLOR5_VIEW:
+	case R_028098_CB_COLOR6_VIEW:
+	case R_02809C_CB_COLOR7_VIEW:
+		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case R_028060_CB_COLOR0_SIZE:
+	case R_028064_CB_COLOR1_SIZE:
+	case R_028068_CB_COLOR2_SIZE:
+	case R_02806C_CB_COLOR3_SIZE:
+	case R_028070_CB_COLOR4_SIZE:
+	case R_028074_CB_COLOR5_SIZE:
+	case R_028078_CB_COLOR6_SIZE:
+	case R_02807C_CB_COLOR7_SIZE:
+		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
+		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_size_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+		/* This register were added late, there is userspace
+		 * which does provide relocation for those but set
+		 * 0 offset. In order to avoid breaking old userspace
+		 * we detect this and set address to point to last
+		 * CB_COLOR0_BASE, note that if userspace doesn't set
+		 * CB_COLOR0_BASE before this register we will report
+		 * error. Old userspace always set CB_COLOR0_BASE
+		 * before any of this.
+		 */
+	case R_0280E0_CB_COLOR0_FRAG:
+	case R_0280E4_CB_COLOR1_FRAG:
+	case R_0280E8_CB_COLOR2_FRAG:
+	case R_0280EC_CB_COLOR3_FRAG:
+	case R_0280F0_CB_COLOR4_FRAG:
+	case R_0280F4_CB_COLOR5_FRAG:
+	case R_0280F8_CB_COLOR6_FRAG:
+	case R_0280FC_CB_COLOR7_FRAG:
+		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
+		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+			if (!track->cb_color_base_last[tmp]) {
+				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
+			ib[idx] = track->cb_color_base_last[tmp];
+		} else {
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_frag_bo[tmp] = reloc->robj;
+			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
+			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case R_0280C0_CB_COLOR0_TILE:
+	case R_0280C4_CB_COLOR1_TILE:
+	case R_0280C8_CB_COLOR2_TILE:
+	case R_0280CC_CB_COLOR3_TILE:
+	case R_0280D0_CB_COLOR4_TILE:
+	case R_0280D4_CB_COLOR5_TILE:
+	case R_0280D8_CB_COLOR6_TILE:
+	case R_0280DC_CB_COLOR7_TILE:
+		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
+		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+			if (!track->cb_color_base_last[tmp]) {
+				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
+			ib[idx] = track->cb_color_base_last[tmp];
+		} else {
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_tile_bo[tmp] = reloc->robj;
+			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
+			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case R_028100_CB_COLOR0_MASK:
+	case R_028104_CB_COLOR1_MASK:
+	case R_028108_CB_COLOR2_MASK:
+	case R_02810C_CB_COLOR3_MASK:
+	case R_028110_CB_COLOR4_MASK:
+	case R_028114_CB_COLOR5_MASK:
+	case R_028118_CB_COLOR6_MASK:
+	case R_02811C_CB_COLOR7_MASK:
+		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
+		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case CB_COLOR0_BASE:
+	case CB_COLOR1_BASE:
+	case CB_COLOR2_BASE:
+	case CB_COLOR3_BASE:
+	case CB_COLOR4_BASE:
+	case CB_COLOR5_BASE:
+	case CB_COLOR6_BASE:
+	case CB_COLOR7_BASE:
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - CB_COLOR0_BASE) / 4;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_base_last[tmp] = ib[idx];
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
+		track->cb_dirty = true;
+		break;
+	case DB_DEPTH_BASE:
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_offset = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_bo = reloc->robj;
+		track->db_bo_mc = reloc->lobj.gpu_offset;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_DATA_BASE:
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->htile_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_SURFACE:
+		track->htile_surface = radeon_get_ib_value(p, idx);
+		/* force 8x8 htile width and height */
+		ib[idx] |= 3;
+		track->db_dirty = true;
+		break;
+	case SQ_PGM_START_FS:
+	case SQ_PGM_START_ES:
+	case SQ_PGM_START_VS:
+	case SQ_PGM_START_GS:
+	case SQ_PGM_START_PS:
+	case SQ_ALU_CONST_CACHE_GS_0:
+	case SQ_ALU_CONST_CACHE_GS_1:
+	case SQ_ALU_CONST_CACHE_GS_2:
+	case SQ_ALU_CONST_CACHE_GS_3:
+	case SQ_ALU_CONST_CACHE_GS_4:
+	case SQ_ALU_CONST_CACHE_GS_5:
+	case SQ_ALU_CONST_CACHE_GS_6:
+	case SQ_ALU_CONST_CACHE_GS_7:
+	case SQ_ALU_CONST_CACHE_GS_8:
+	case SQ_ALU_CONST_CACHE_GS_9:
+	case SQ_ALU_CONST_CACHE_GS_10:
+	case SQ_ALU_CONST_CACHE_GS_11:
+	case SQ_ALU_CONST_CACHE_GS_12:
+	case SQ_ALU_CONST_CACHE_GS_13:
+	case SQ_ALU_CONST_CACHE_GS_14:
+	case SQ_ALU_CONST_CACHE_GS_15:
+	case SQ_ALU_CONST_CACHE_PS_0:
+	case SQ_ALU_CONST_CACHE_PS_1:
+	case SQ_ALU_CONST_CACHE_PS_2:
+	case SQ_ALU_CONST_CACHE_PS_3:
+	case SQ_ALU_CONST_CACHE_PS_4:
+	case SQ_ALU_CONST_CACHE_PS_5:
+	case SQ_ALU_CONST_CACHE_PS_6:
+	case SQ_ALU_CONST_CACHE_PS_7:
+	case SQ_ALU_CONST_CACHE_PS_8:
+	case SQ_ALU_CONST_CACHE_PS_9:
+	case SQ_ALU_CONST_CACHE_PS_10:
+	case SQ_ALU_CONST_CACHE_PS_11:
+	case SQ_ALU_CONST_CACHE_PS_12:
+	case SQ_ALU_CONST_CACHE_PS_13:
+	case SQ_ALU_CONST_CACHE_PS_14:
+	case SQ_ALU_CONST_CACHE_PS_15:
+	case SQ_ALU_CONST_CACHE_VS_0:
+	case SQ_ALU_CONST_CACHE_VS_1:
+	case SQ_ALU_CONST_CACHE_VS_2:
+	case SQ_ALU_CONST_CACHE_VS_3:
+	case SQ_ALU_CONST_CACHE_VS_4:
+	case SQ_ALU_CONST_CACHE_VS_5:
+	case SQ_ALU_CONST_CACHE_VS_6:
+	case SQ_ALU_CONST_CACHE_VS_7:
+	case SQ_ALU_CONST_CACHE_VS_8:
+	case SQ_ALU_CONST_CACHE_VS_9:
+	case SQ_ALU_CONST_CACHE_VS_10:
+	case SQ_ALU_CONST_CACHE_VS_11:
+	case SQ_ALU_CONST_CACHE_VS_12:
+	case SQ_ALU_CONST_CACHE_VS_13:
+	case SQ_ALU_CONST_CACHE_VS_14:
+	case SQ_ALU_CONST_CACHE_VS_15:
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MEMORY_EXPORT_BASE:
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MISC:
+		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+		break;
+	default:
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+unsigned r600_mip_minify(unsigned size, unsigned level)
+{
+	unsigned val;
+
+	val = max(1U, size >> level);
+	if (level > 0)
+		val = roundup_pow_of_two(val);
+	return val;
+}
+
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
+			      unsigned block_align, unsigned height_align, unsigned base_align,
+			      unsigned *l0_size, unsigned *mipmap_size)
+{
+	unsigned offset, i, level;
+	unsigned width, height, depth, size;
+	unsigned blocksize;
+	unsigned nbx, nby;
+	unsigned nlevels = llevel - blevel + 1;
+
+	*l0_size = -1;
+	blocksize = r600_fmt_get_blocksize(format);
+
+	w0 = r600_mip_minify(w0, 0);
+	h0 = r600_mip_minify(h0, 0);
+	d0 = r600_mip_minify(d0, 0);
+	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+		width = r600_mip_minify(w0, i);
+		nbx = r600_fmt_get_nblocksx(format, width);
+
+		nbx = roundup(nbx, block_align);
+
+		height = r600_mip_minify(h0, i);
+		nby = r600_fmt_get_nblocksy(format, height);
+		nby = roundup(nby, height_align);
+
+		depth = r600_mip_minify(d0, i);
+
+		size = nbx * nby * blocksize * nsamples;
+		if (nfaces)
+			size *= nfaces;
+		else
+			size *= depth;
+
+		if (i == 0)
+			*l0_size = size;
+
+		if (i == 0 || i == 1)
+			offset = roundup(offset, base_align);
+
+		offset += size;
+	}
+	*mipmap_size = offset;
+	if (llevel == 0)
+		*mipmap_size = *l0_size;
+	if (!blevel)
+		*mipmap_size -= *l0_size;
+}
+
+/**
+ * r600_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+					      struct radeon_bo *texture,
+					      struct radeon_bo *mipmap,
+					      u64 base_offset,
+					      u64 mip_offset,
+					      u32 tiling_flags)
+{
+	struct r600_cs_track *track = p->track;
+	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
+	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
+	u32 height_align, pitch, pitch_align, depth_align;
+	u32 barray, larray;
+	u64 base_align;
+	struct array_mode_checker array_check;
+	u32 format;
+	bool is_array;
+
+	/* on legacy kernel we don't perform advanced check */
+	if (p->rdev == NULL)
+		return 0;
+
+	/* convert to bytes */
+	base_offset <<= 8;
+	mip_offset <<= 8;
+
+	word0 = radeon_get_ib_value(p, idx + 0);
+	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+		else if (tiling_flags & RADEON_TILING_MICRO)
+			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+	}
+	word1 = radeon_get_ib_value(p, idx + 1);
+	word2 = radeon_get_ib_value(p, idx + 2) << 8;
+	word3 = radeon_get_ib_value(p, idx + 3) << 8;
+	word4 = radeon_get_ib_value(p, idx + 4);
+	word5 = radeon_get_ib_value(p, idx + 5);
+	dim = G_038000_DIM(word0);
+	w0 = G_038000_TEX_WIDTH(word0) + 1;
+	pitch = (G_038000_PITCH(word0) + 1) * 8;
+	h0 = G_038004_TEX_HEIGHT(word1) + 1;
+	d0 = G_038004_TEX_DEPTH(word1);
+	format = G_038004_DATA_FORMAT(word1);
+	blevel = G_038010_BASE_LEVEL(word4);
+	llevel = G_038014_LAST_LEVEL(word5);
+	/* pitch in texels */
+	array_check.array_mode = G_038000_TILE_MODE(word0);
+	array_check.group_size = track->group_size;
+	array_check.nbanks = track->nbanks;
+	array_check.npipes = track->npipes;
+	array_check.nsamples = 1;
+	array_check.blocksize = r600_fmt_get_blocksize(format);
+	nfaces = 1;
+	is_array = false;
+	switch (dim) {
+	case V_038000_SQ_TEX_DIM_1D:
+	case V_038000_SQ_TEX_DIM_2D:
+	case V_038000_SQ_TEX_DIM_3D:
+		break;
+	case V_038000_SQ_TEX_DIM_CUBEMAP:
+		if (p->family >= CHIP_RV770)
+			nfaces = 8;
+		else
+			nfaces = 6;
+		break;
+	case V_038000_SQ_TEX_DIM_1D_ARRAY:
+	case V_038000_SQ_TEX_DIM_2D_ARRAY:
+		is_array = true;
+		break;
+	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		is_array = true;
+		/* fall through */
+	case V_038000_SQ_TEX_DIM_2D_MSAA:
+		array_check.nsamples = 1 << llevel;
+		llevel = 0;
+		break;
+	default:
+		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+		return -EINVAL;
+	}
+	if (!r600_fmt_is_valid_texture(format, p->family)) {
+		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+			 __func__, __LINE__, format);
+		return -EINVAL;
+	}
+
+	if (r600_get_array_mode_alignment(&array_check,
+					  &pitch_align, &height_align, &depth_align, &base_align)) {
+		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
+			 __func__, __LINE__, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+
+	/* XXX check height as well... */
+
+	if (!IS_ALIGNED(pitch, pitch_align)) {
+		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(base_offset, base_align)) {
+		dev_warn(p->dev, "%s:%d tex base offset (0x%jx, 0x%jx, %d) invalid\n",
+			 __func__, __LINE__, (uintmax_t)base_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(mip_offset, base_align)) {
+		dev_warn(p->dev, "%s:%d tex mip offset (0x%jx, 0x%jx, %d) invalid\n",
+			 __func__, __LINE__, (uintmax_t)mip_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+
+	if (blevel > llevel) {
+		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
+			 blevel, llevel);
+	}
+	if (is_array) {
+		barray = G_038014_BASE_ARRAY(word5);
+		larray = G_038014_LAST_ARRAY(word5);
+
+		nfaces = larray - barray + 1;
+	}
+	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
+			  pitch_align, height_align, base_align,
+			  &l0_size, &mipmap_size);
+	/* using get ib will give us the offset into the texture bo */
+	if ((l0_size + word2) > radeon_bo_size(texture)) {
+		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
+			 w0, h0, pitch_align, height_align,
+			 array_check.array_mode, format, word2,
+			 l0_size, radeon_bo_size(texture));
+		dev_warn(p->dev, "alignments %d %d %d %jd\n", pitch, pitch_align, height_align, (uintmax_t)base_align);
+		return -EINVAL;
+	}
+	/* using get ib will give us the offset into the mipmap bo */
+	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
+		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
+	}
+	return 0;
+}
+
+static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	u32 m, i;
+
+	i = (reg >> 7);
+	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return false;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (!(r600_reg_safe_bm[i] & m))
+		return true;
+	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+	return false;
+}
+
+static int r600_packet3_check(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r600_cs_track *track;
+	volatile u32 *ib;
+	unsigned idx;
+	unsigned i;
+	unsigned start_reg, end_reg, reg;
+	int r;
+	u32 idx_value;
+
+	track = (struct r600_cs_track *)p->track;
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (pkt->opcode) {
+	case PACKET3_SET_PREDICATION:
+	{
+		int pred_op;
+		int tmp;
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		tmp = radeon_get_ib_value(p, idx + 1);
+		pred_op = (tmp >> 16) & 0x7;
+
+		/* for the clear predicate operation */
+		if (pred_op == 0)
+			return 0;
+
+		if (pred_op > 2) {
+			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+			return -EINVAL;
+		}
+
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (idx_value & 0xfffffff0) +
+		         ((u64)(tmp & 0xff) << 32);
+
+		ib[idx + 0] = offset;
+		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+	}
+	break;
+
+	case PACKET3_START_3D_CMDBUF:
+		if (p->family >= CHIP_RV770 || pkt->count) {
+			DRM_ERROR("bad START_3D\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_CONTEXT_CONTROL:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad CONTEXT_CONTROL\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_NUM_INSTANCES:
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_DRAW_INDEX:
+	{
+		uint64_t offset;
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         idx_value +
+		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_AUTO:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+			return -EINVAL;
+		}
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_IMMD_BE:
+	case PACKET3_DRAW_INDEX_IMMD:
+		if (pkt->count < 2) {
+			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+			return -EINVAL;
+		}
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_WAIT_REG_MEM:
+		if (pkt->count != 5) {
+			DRM_ERROR("bad WAIT_REG_MEM\n");
+			return -EINVAL;
+		}
+		/* bit 4 is reg (0) or mem (1) */
+		if (idx_value & 0x10) {
+			uint64_t offset;
+
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad WAIT_REG_MEM\n");
+				return -EINVAL;
+			}
+
+			offset = reloc->lobj.gpu_offset +
+			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
+			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_CP_DMA:
+	{
+		u32 command, size;
+		u64 offset, tmp;
+		if (pkt->count != 4) {
+			DRM_ERROR("bad CP DMA\n");
+			return -EINVAL;
+		}
+		command = radeon_get_ib_value(p, idx+4);
+		size = command & 0x1fffff;
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			DRM_ERROR("CP DMA SAS not supported\n");
+			return -EINVAL;
+		} else {
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				DRM_ERROR("CP DMA SAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			/* src address space is memory */
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad CP DMA SRC\n");
+				return -EINVAL;
+			}
+
+			tmp = radeon_get_ib_value(p, idx) +
+				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+			offset = reloc->lobj.gpu_offset + tmp;
+
+			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+				dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n",
+					 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+
+			ib[idx] = offset;
+			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			DRM_ERROR("CP DMA DAS not supported\n");
+			return -EINVAL;
+		} else {
+			/* dst address space is memory */
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				DRM_ERROR("CP DMA DAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad CP DMA DST\n");
+				return -EINVAL;
+			}
+
+			tmp = radeon_get_ib_value(p, idx+2) +
+				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+			offset = reloc->lobj.gpu_offset + tmp;
+
+			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+				dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n",
+					 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+
+			ib[idx+2] = offset;
+			ib[idx+3] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	}
+	case PACKET3_SURFACE_SYNC:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad SURFACE_SYNC\n");
+			return -EINVAL;
+		}
+		/* 0xffffffff/0x0 is flush all cache flag */
+		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+		    radeon_get_ib_value(p, idx + 2) != 0) {
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad SURFACE_SYNC\n");
+				return -EINVAL;
+			}
+			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_EVENT_WRITE:
+		if (pkt->count != 2 && pkt->count != 0) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			uint64_t offset;
+
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad EVENT_WRITE\n");
+				return -EINVAL;
+			}
+			offset = reloc->lobj.gpu_offset +
+			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = offset & 0xfffffff8;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_EVENT_WRITE_EOP:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = r600_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_CONTEXT_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = r600_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_RESOURCE:
+		if (pkt->count % 7) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
+		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
+		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < (pkt->count / 7); i++) {
+			struct radeon_bo *texture, *mipmap;
+			u32 size, offset, base_offset, mip_offset;
+
+			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
+			case SQ_TEX_VTX_VALID_TEXTURE:
+				/* tex base */
+				r = r600_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+					else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+				}
+				texture = reloc->robj;
+				/* tex mip base */
+				r = r600_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+				mipmap = reloc->robj;
+				r = r600_check_texture_resource(p,  idx+(i*7)+1,
+								texture, mipmap,
+								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
+								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
+								reloc->lobj.tiling_flags);
+				if (r)
+					return r;
+				ib[idx+1+(i*7)+2] += base_offset;
+				ib[idx+1+(i*7)+3] += mip_offset;
+				break;
+			case SQ_TEX_VTX_VALID_BUFFER:
+			{
+				uint64_t offset64;
+				/* vtx base */
+				r = r600_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
+				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
+				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+					/* force size to size of the buffer */
+					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+						 size + offset, radeon_bo_size(reloc->robj));
+					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
+				}
+
+				offset64 = reloc->lobj.gpu_offset + offset;
+				ib[idx+1+(i*8)+0] = offset64;
+				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+						    (upper_32_bits(offset64) & 0xff);
+				break;
+			}
+			case SQ_TEX_VTX_INVALID_TEXTURE:
+			case SQ_TEX_VTX_INVALID_BUFFER:
+			default:
+				DRM_ERROR("bad SET_RESOURCE\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_ALU_CONST:
+		if (track->sq_config & DX9_CONSTS) {
+			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
+			end_reg = 4 * pkt->count + start_reg - 4;
+			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
+			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
+			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
+				DRM_ERROR("bad SET_ALU_CONST\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_BOOL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+			DRM_ERROR("bad SET_BOOL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_LOOP_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+			DRM_ERROR("bad SET_LOOP_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CTL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+			DRM_ERROR("bad SET_CTL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_SAMPLER:
+		if (pkt->count % 3) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
+		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
+		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BASE_UPDATE:
+		/* RS780 and RS880 also need this */
+		if (p->family < CHIP_RS780) {
+			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+			return -EINVAL;
+		}
+		if (pkt->count != 1) {
+			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+			return -EINVAL;
+		}
+		if (idx_value > 3) {
+			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+			return -EINVAL;
+		}
+		{
+			u64 offset;
+
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+				return -EINVAL;
+			}
+
+			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+				return -EINVAL;
+			}
+
+			offset = radeon_get_ib_value(p, idx+1) << 8;
+			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%jx, 0x%x\n",
+					  (uintmax_t)offset, track->vgt_strmout_bo_offset[idx_value]);
+				return -EINVAL;
+			}
+
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_SURFACE_BASE_UPDATE:
+		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
+			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+			return -EINVAL;
+		}
+		/* Updating memory at DST_ADDRESS. */
+		if (idx_value & 0x1) {
+			u64 offset;
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		/* Reading data from SRC_ADDRESS. */
+		if (((idx_value >> 1) & 0x3) == 2) {
+			u64 offset;
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_MEM_WRITE:
+	{
+		u64 offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+			return -EINVAL;
+		}
+		r = r600_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+			return -EINVAL;
+		}
+		offset = radeon_get_ib_value(p, idx+0);
+		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+		if (offset & 0x7) {
+			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+			return -EINVAL;
+		}
+		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+			DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n",
+				  (uintmax_t)offset + 8, radeon_bo_size(reloc->robj));
+			return -EINVAL;
+		}
+		offset += reloc->lobj.gpu_offset;
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+		break;
+	}
+	case PACKET3_COPY_DW:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad COPY_DW (invalid count)\n");
+			return -EINVAL;
+		}
+		if (idx_value & 0x1) {
+			u64 offset;
+			/* SRC is memory. */
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* SRC is a reg. */
+			reg = radeon_get_ib_value(p, idx+1) << 2;
+			if (!r600_is_safe_reg(p, reg, idx+1))
+				return -EINVAL;
+		}
+		if (idx_value & 0x2) {
+			u64 offset;
+			/* DST is memory. */
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n",
+					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* DST is a reg. */
+			reg = radeon_get_ib_value(p, idx+3) << 2;
+			if (!r600_is_safe_reg(p, reg, idx+3))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r600_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r600_cs_track *track;
+	int r;
+
+	if (p->track == NULL) {
+		/* initialize tracker, we are in kms */
+		track = malloc(sizeof(*track),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (track == NULL)
+			return -ENOMEM;
+		r600_cs_track_init(track);
+		if (p->rdev->family < CHIP_RV770) {
+			track->npipes = p->rdev->config.r600.tiling_npipes;
+			track->nbanks = p->rdev->config.r600.tiling_nbanks;
+			track->group_size = p->rdev->config.r600.tiling_group_size;
+		} else if (p->rdev->family <= CHIP_RV740) {
+			track->npipes = p->rdev->config.rv770.tiling_npipes;
+			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
+			track->group_size = p->rdev->config.rv770.tiling_group_size;
+		}
+		p->track = track;
+	}
+	do {
+		r = r600_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case PACKET_TYPE0:
+			r = r600_cs_parse_packet0(p, &pkt);
+			break;
+		case PACKET_TYPE2:
+			break;
+		case PACKET_TYPE3:
+			r = r600_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return -EINVAL;
+		}
+		if (r) {
+			free(p->track, DRM_MEM_DRIVER);
+			p->track = NULL;
+			return r;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib.length_dw; r++) {
+		DRM_INFO("%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	free(p->track, DRM_MEM_DRIVER);
+	p->track = NULL;
+	return 0;
+}
+
+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+{
+	if (p->chunk_relocs_idx == -1) {
+		return 0;
+	}
+	p->relocs = malloc(sizeof(struct radeon_cs_reloc),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (p->relocs == NULL) {
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+{
+	unsigned i;
+
+	free(parser->relocs, DRM_MEM_DRIVER);
+	for (i = 0; i < parser->nchunks; i++) {
+		free(parser->chunks[i].kdata, DRM_MEM_DRIVER);
+		if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+			free(parser->chunks[i].kpage[0], DRM_MEM_DRIVER);
+			free(parser->chunks[i].kpage[1], DRM_MEM_DRIVER);
+		}
+	}
+	free(parser->chunks, DRM_MEM_DRIVER);
+	free(parser->chunks_array, DRM_MEM_DRIVER);
+	free(parser->track, DRM_MEM_DRIVER);
+}
+
+int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
+			unsigned family, u32 *ib, int *l)
+{
+	struct radeon_cs_parser parser;
+	struct radeon_cs_chunk *ib_chunk;
+	struct r600_cs_track *track;
+	int r;
+
+	/* initialize tracker */
+	track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (track == NULL)
+		return -ENOMEM;
+	r600_cs_track_init(track);
+	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
+	/* initialize parser */
+	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+	parser.filp = filp;
+	parser.dev = dev->dev;
+	parser.rdev = NULL;
+	parser.family = family;
+	parser.track = track;
+	parser.ib.ptr = ib;
+	r = radeon_cs_parser_init(&parser, data);
+	if (r) {
+		DRM_ERROR("Failed to initialize parser !\n");
+		r600_cs_parser_fini(&parser, r);
+		return r;
+	}
+	r = r600_cs_parser_relocs_legacy(&parser);
+	if (r) {
+		DRM_ERROR("Failed to parse relocation !\n");
+		r600_cs_parser_fini(&parser, r);
+		return r;
+	}
+	/* Copy the packet into the IB, the parser will read from the
+	 * input memory (cached) and write to the IB (which can be
+	 * uncached). */
+	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+	parser.ib.length_dw = ib_chunk->length_dw;
+	*l = parser.ib.length_dw;
+	r = r600_cs_parse(&parser);
+	if (r) {
+		DRM_ERROR("Invalid command stream !\n");
+		r600_cs_parser_fini(&parser, r);
+		return r;
+	}
+	r = radeon_cs_finish_pages(&parser);
+	if (r) {
+		DRM_ERROR("Invalid command stream !\n");
+		r600_cs_parser_fini(&parser, r);
+		return r;
+	}
+	r600_cs_parser_fini(&parser, r);
+	return r;
+}
+
+void r600_cs_legacy_init(void)
+{
+	r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
+}
+
+/*
+ *  DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p:		parser structure holding parsing context.
+ * @cs_reloc:		reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+			   struct radeon_cs_reloc **cs_reloc)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	unsigned idx;
+
+	*cs_reloc = NULL;
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	idx = p->dma_reloc_idx;
+	if (idx >= p->nrelocs) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, p->nrelocs);
+		return -EINVAL;
+	}
+	*cs_reloc = p->relocs_ptr[idx];
+	p->dma_reloc_idx++;
+	return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p:		parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	struct radeon_cs_reloc *src_reloc, *dst_reloc;
+	u32 header, cmd, count, tiled;
+	volatile u32 *ib = p->ib.ptr;
+	u32 idx, idx_value;
+	u64 src_offset, dst_offset;
+	int r;
+
+	do {
+		if (p->idx >= ib_chunk->length_dw) {
+			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+				  p->idx, ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		idx = p->idx;
+		header = radeon_get_ib_value(p, idx);
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		tiled = GET_DMA_T(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				p->idx += count + 5;
+			} else {
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += count + 3;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n",
+					 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			r = r600_dma_cs_next_reloc(p, &src_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				idx_value = radeon_get_ib_value(p, idx + 2);
+				/* detile bit */
+				if (idx_value & (1U << 31)) {
+					/* tiled src, linear dst */
+					src_offset = radeon_get_ib_value(p, idx+1);
+					src_offset <<= 8;
+					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+					dst_offset = radeon_get_ib_value(p, idx+5);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+					ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					src_offset = radeon_get_ib_value(p, idx+5);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+					ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset <<= 8;
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				}
+				p->idx += 7;
+			} else {
+				if (p->family >= CHIP_RV770) {
+					src_offset = radeon_get_ib_value(p, idx+2);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+					p->idx += 5;
+				} else {
+					src_offset = radeon_get_ib_value(p, idx+2);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
+
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+					ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+					p->idx += 4;
+				}
+			}
+			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+				dev_warn(p->dev, "DMA copy src buffer too small (%ju %lu)\n",
+					 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+				return -EINVAL;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write dst buffer too small (%ju %lu)\n",
+					 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			if (p->family < CHIP_RV770) {
+				DRM_ERROR("Constant Fill is 7xx only !\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			dst_offset = radeon_get_ib_value(p, idx+1);
+			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n",
+					 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+			p->idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			p->idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib->length_dw; r++) {
+		DRM_INFO("%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/r600_cs.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_cs.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_cs.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_cs.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,12 @@
+/* $MidnightBSD$ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_cs.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef __R600_CS_H__
+#define	__R600_CS_H__
+
+int	r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+	    struct radeon_cs_reloc **cs_reloc);
+
+#endif /* !defined(__R600_CS_H__) */


Property changes on: trunk/sys/dev/drm2/radeon/r600_cs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_hdmi.c
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_hdmi.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_hdmi.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,589 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_hdmi.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+#include "atom.h"
+
+/*
+ * HDMI color format
+ */
+enum r600_hdmi_color_format {
+	RGB = 0,
+	YCC_422 = 1,
+	YCC_444 = 2
+};
+
+/*
+ * IEC60958 status bits
+ */
+enum r600_hdmi_iec_status_bits {
+	AUDIO_STATUS_DIG_ENABLE   = 0x01,
+	AUDIO_STATUS_V            = 0x02,
+	AUDIO_STATUS_VCFG         = 0x04,
+	AUDIO_STATUS_EMPHASIS     = 0x08,
+	AUDIO_STATUS_COPYRIGHT    = 0x10,
+	AUDIO_STATUS_NONAUDIO     = 0x20,
+	AUDIO_STATUS_PROFESSIONAL = 0x40,
+	AUDIO_STATUS_LEVEL        = 0x80
+};
+
+static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
+    /*	     32kHz	  44.1kHz	48kHz    */
+    /* Clock      N     CTS      N     CTS      N     CTS */
+    {  25174,  4576,  28125,  7007,  31250,  6864,  28125 }, /*  25,20/1.001 MHz */
+    {  25200,  4096,  25200,  6272,  28000,  6144,  25200 }, /*  25.20       MHz */
+    {  27000,  4096,  27000,  6272,  30000,  6144,  27000 }, /*  27.00       MHz */
+    {  27027,  4096,  27027,  6272,  30030,  6144,  27027 }, /*  27.00*1.001 MHz */
+    {  54000,  4096,  54000,  6272,  60000,  6144,  54000 }, /*  54.00       MHz */
+    {  54054,  4096,  54054,  6272,  60060,  6144,  54054 }, /*  54.00*1.001 MHz */
+    {  74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /*  74.25/1.001 MHz */
+    {  74250,  4096,  74250,  6272,  82500,  6144,  74250 }, /*  74.25       MHz */
+    { 148351, 11648, 421875,  8918, 234375,  5824, 140625 }, /* 148.50/1.001 MHz */
+    { 148500,  4096, 148500,  6272, 165000,  6144, 148500 }, /* 148.50       MHz */
+    {      0,  4096,      0,  6272,      0,  6144,      0 }  /* Other */
+};
+
+/*
+ * calculate CTS value if it's not found in the table
+ */
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
+{
+	if (*CTS == 0)
+		*CTS = clock * N / (128 * freq) * 1000;
+	DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
+		  N, *CTS, freq);
+}
+
+struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+{
+	struct radeon_hdmi_acr res;
+	u8 i;
+
+	for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+	     r600_hdmi_predefined_acr[i].clock != 0; i++)
+		;
+	res = r600_hdmi_predefined_acr[i];
+
+	/* In case some CTS are missing */
+	r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+	r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+	r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+
+	return res;
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
+	WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
+
+	WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
+	WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
+
+	WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
+	WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void r600_hdmi_infoframe_checksum(uint8_t packetType,
+					 uint8_t versionNumber,
+					 uint8_t length,
+					 uint8_t *frame)
+{
+	int i;
+	frame[0] = packetType + versionNumber + length;
+	for (i = 1; i <= length; i++)
+		frame[0] += frame[i];
+	frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void r600_hdmi_videoinfoframe(
+	struct drm_encoder *encoder,
+	enum r600_hdmi_color_format color_format,
+	int active_information_present,
+	uint8_t active_format_aspect_ratio,
+	uint8_t scan_information,
+	uint8_t colorimetry,
+	uint8_t ex_colorimetry,
+	uint8_t quantization,
+	int ITC,
+	uint8_t picture_aspect_ratio,
+	uint8_t video_format_identification,
+	uint8_t pixel_repetition,
+	uint8_t non_uniform_picture_scaling,
+	uint8_t bar_info_data_valid,
+	uint16_t top_bar,
+	uint16_t bottom_bar,
+	uint16_t left_bar,
+	uint16_t right_bar
+)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	uint8_t frame[14];
+
+	frame[0x0] = 0;
+	frame[0x1] =
+		(scan_information & 0x3) |
+		((bar_info_data_valid & 0x3) << 2) |
+		((active_information_present & 0x1) << 4) |
+		((color_format & 0x3) << 5);
+	frame[0x2] =
+		(active_format_aspect_ratio & 0xF) |
+		((picture_aspect_ratio & 0x3) << 4) |
+		((colorimetry & 0x3) << 6);
+	frame[0x3] =
+		(non_uniform_picture_scaling & 0x3) |
+		((quantization & 0x3) << 2) |
+		((ex_colorimetry & 0x7) << 4) |
+		((ITC & 0x1) << 7);
+	frame[0x4] = (video_format_identification & 0x7F);
+	frame[0x5] = (pixel_repetition & 0xF);
+	frame[0x6] = (top_bar & 0xFF);
+	frame[0x7] = (top_bar >> 8);
+	frame[0x8] = (bottom_bar & 0xFF);
+	frame[0x9] = (bottom_bar >> 8);
+	frame[0xA] = (left_bar & 0xFF);
+	frame[0xB] = (left_bar >> 8);
+	frame[0xC] = (right_bar & 0xFF);
+	frame[0xD] = (right_bar >> 8);
+
+	r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+	/* Our header values (type, version, length) should be alright, Intel
+	 * is using the same. Checksum function also seems to be OK, it works
+	 * fine for audio infoframe. However calculated value is always lower
+	 * by 2 in comparison to fglrx. It breaks displaying anything in case
+	 * of TVs that strictly check the checksum. Hack it manually here to
+	 * workaround this issue. */
+	frame[0x0] += 2;
+
+	WREG32(HDMI0_AVI_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(HDMI0_AVI_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+	WREG32(HDMI0_AVI_INFO2 + offset,
+		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+	WREG32(HDMI0_AVI_INFO3 + offset,
+		frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * build a Audio Info Frame
+ */
+static void r600_hdmi_audioinfoframe(
+	struct drm_encoder *encoder,
+	uint8_t channel_count,
+	uint8_t coding_type,
+	uint8_t sample_size,
+	uint8_t sample_frequency,
+	uint8_t format,
+	uint8_t channel_allocation,
+	uint8_t level_shift,
+	int downmix_inhibit
+)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	uint8_t frame[11];
+
+	frame[0x0] = 0;
+	frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
+	frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
+	frame[0x3] = format;
+	frame[0x4] = channel_allocation;
+	frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
+	frame[0x6] = 0;
+	frame[0x7] = 0;
+	frame[0x8] = 0;
+	frame[0x9] = 0;
+	frame[0xA] = 0;
+
+	r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
+
+	WREG32(HDMI0_AUDIO_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(HDMI0_AUDIO_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
+}
+
+/*
+ * test if audio buffer is filled enough to start playing
+ */
+static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
+}
+
+/*
+ * have buffer status changed since last call?
+ */
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	int status, result;
+
+	if (!dig->afmt || !dig->afmt->enabled)
+		return 0;
+
+	status = r600_hdmi_is_audio_buffer_filled(encoder);
+	result = dig->afmt->last_buffer_filled_status != status;
+	dig->afmt->last_buffer_filled_status = status;
+
+	return result;
+}
+
+/*
+ * write the audio workaround status to the hardware
+ */
+static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+	bool hdmi_audio_workaround = false; /* FIXME */
+	u32 value;
+
+	if (!hdmi_audio_workaround ||
+	    r600_hdmi_is_audio_buffer_filled(encoder))
+		value = 0; /* disable workaround */
+	else
+		value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
+	WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		 value, ~HDMI0_AUDIO_TEST_EN);
+}
+
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (!dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	r600_audio_set_clock(encoder, mode->clock);
+
+	WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+	       HDMI0_NULL_SEND); /* send null packets when required */
+
+	WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+	if (ASIC_IS_DCE32(rdev)) {
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		       HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+		       HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+		       AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+		       AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+	} else {
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		       HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+		       HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+		       HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+		       HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+	}
+
+	WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+	       HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+	       HDMI0_ACR_SOURCE); /* select SW CTS value */
+
+	WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+	       HDMI0_NULL_SEND | /* send null packets when required */
+	       HDMI0_GC_SEND | /* send general control packets */
+	       HDMI0_GC_CONT); /* send general control packets every frame */
+
+	/* TODO: HDMI0_AUDIO_INFO_UPDATE */
+	WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+	       HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+	       HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+	       HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+	       HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
+
+	WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+	       HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+	       HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+	WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
+
+	r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+	r600_hdmi_update_ACR(encoder, mode->clock);
+
+	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+	WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+	WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+	WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+	WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+
+	r600_hdmi_audio_workaround(encoder);
+}
+
+/*
+ * update settings with current parameters from audio engine
+ */
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct r600_audio audio = r600_audio_status(rdev);
+	uint32_t offset;
+	uint32_t iec;
+
+	if (!dig->afmt || !dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
+		 r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
+		  audio.channels, audio.rate, audio.bits_per_sample);
+	DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
+		  (int)audio.status_bits, (int)audio.category_code);
+
+	iec = 0;
+	if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
+		iec |= 1 << 0;
+	if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
+		iec |= 1 << 1;
+	if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
+		iec |= 1 << 2;
+	if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
+		iec |= 1 << 3;
+
+	iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
+
+	switch (audio.rate) {
+	case 32000:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
+		break;
+	case 44100:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
+		break;
+	case 48000:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
+		break;
+	case 88200:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
+		break;
+	case 96000:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
+		break;
+	case 176400:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
+		break;
+	case 192000:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
+		break;
+	}
+
+	WREG32(HDMI0_60958_0 + offset, iec);
+
+	iec = 0;
+	switch (audio.bits_per_sample) {
+	case 16:
+		iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
+		break;
+	case 20:
+		iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
+		break;
+	case 24:
+		iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
+		break;
+	}
+	if (audio.status_bits & AUDIO_STATUS_V)
+		iec |= 0x5 << 16;
+	WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
+
+	r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
+				 0);
+
+	r600_hdmi_audio_workaround(encoder);
+}
+
+/*
+ * enable the HDMI engine
+ */
+void r600_hdmi_enable(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset;
+	u32 hdmi;
+
+	if (ASIC_IS_DCE6(rdev))
+		return;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	/* Older chipsets require setting HDMI and routing manually */
+	if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+		hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+			WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
+				 ~AVIVO_TMDSA_CNTL_HDMI_EN);
+			hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
+				 ~AVIVO_LVTMA_CNTL_HDMI_EN);
+			hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
+			hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+			hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
+			break;
+		default:
+			dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+				radeon_encoder->encoder_id);
+			break;
+		}
+		WREG32(HDMI0_CONTROL + offset, hdmi);
+	}
+
+	if (rdev->irq.installed) {
+		/* if irq is available use it */
+		radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+	}
+
+	dig->afmt->enabled = true;
+
+	DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+		  offset, radeon_encoder->encoder_id);
+}
+
+/*
+ * disable the HDMI engine
+ */
+void r600_hdmi_disable(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset;
+
+	if (ASIC_IS_DCE6(rdev))
+		return;
+
+	/* Called for ATOM_ENCODER_MODE_HDMI only */
+	if (!dig || !dig->afmt) {
+		return;
+	}
+	if (!dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+		  offset, radeon_encoder->encoder_id);
+
+	/* disable irq */
+	radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
+
+	/* Older chipsets not handled by AtomBIOS */
+	if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+			WREG32_P(AVIVO_TMDSA_CNTL, 0,
+				 ~AVIVO_TMDSA_CNTL_HDMI_EN);
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			WREG32_P(AVIVO_LVTMA_CNTL, 0,
+				 ~AVIVO_LVTMA_CNTL_HDMI_EN);
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+			break;
+		default:
+			dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+				radeon_encoder->encoder_id);
+			break;
+		}
+		WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
+	}
+
+	dig->afmt->enabled = false;
+}


Property changes on: trunk/sys/dev/drm2/radeon/r600_hdmi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_reg.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_reg.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_reg.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,178 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R600_REG_H__
+#define __R600_REG_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_reg.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#define R600_PCIE_PORT_INDEX                0x0038
+#define R600_PCIE_PORT_DATA                 0x003c
+
+#define R600_MC_VM_FB_LOCATION			0x2180
+#define		R600_MC_FB_BASE_MASK			0x0000FFFF
+#define		R600_MC_FB_BASE_SHIFT			0
+#define		R600_MC_FB_TOP_MASK			0xFFFF0000
+#define		R600_MC_FB_TOP_SHIFT			16
+#define R600_MC_VM_AGP_TOP			0x2184
+#define		R600_MC_AGP_TOP_MASK			0x0003FFFF
+#define		R600_MC_AGP_TOP_SHIFT			0
+#define R600_MC_VM_AGP_BOT			0x2188
+#define		R600_MC_AGP_BOT_MASK			0x0003FFFF
+#define		R600_MC_AGP_BOT_SHIFT			0
+#define R600_MC_VM_AGP_BASE			0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR	0x2190
+#define		R600_LOGICAL_PAGE_NUMBER_MASK		0x000FFFFF
+#define		R600_LOGICAL_PAGE_NUMBER_SHIFT		0
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR	0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR	0x2198
+
+#define R700_MC_VM_FB_LOCATION			0x2024
+#define		R700_MC_FB_BASE_MASK			0x0000FFFF
+#define		R700_MC_FB_BASE_SHIFT			0
+#define		R700_MC_FB_TOP_MASK			0xFFFF0000
+#define		R700_MC_FB_TOP_SHIFT			16
+#define R700_MC_VM_AGP_TOP			0x2028
+#define		R700_MC_AGP_TOP_MASK			0x0003FFFF
+#define		R700_MC_AGP_TOP_SHIFT			0
+#define R700_MC_VM_AGP_BOT			0x202c
+#define		R700_MC_AGP_BOT_MASK			0x0003FFFF
+#define		R700_MC_AGP_BOT_SHIFT			0
+#define R700_MC_VM_AGP_BASE			0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR	0x2034
+#define		R700_LOGICAL_PAGE_NUMBER_MASK		0x000FFFFF
+#define		R700_LOGICAL_PAGE_NUMBER_SHIFT		0
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR	0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR	0x203c
+
+#define R600_RAMCFG				       0x2408
+#       define R600_CHANSIZE                           (1 << 7)
+#       define R600_CHANSIZE_OVERRIDE                  (1 << 10)
+
+
+#define R600_GENERAL_PWRMGT                                        0x618
+#	define R600_OPEN_DRAIN_PADS				   (1 << 11)
+
+#define R600_LOWER_GPIO_ENABLE                                     0x710
+#define R600_CTXSW_VID_LOWER_GPIO_CNTL                             0x718
+#define R600_HIGH_VID_LOWER_GPIO_CNTL                              0x71c
+#define R600_MEDIUM_VID_LOWER_GPIO_CNTL                            0x720
+#define R600_LOW_VID_LOWER_GPIO_CNTL                               0x724
+
+#define R600_D1GRPH_SWAP_CONTROL                               0x610C
+#       define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
+
+#define R600_HDP_NONSURFACE_BASE                                0x2c04
+
+#define R600_BUS_CNTL                                           0x5420
+#       define R600_BIOS_ROM_DIS                                (1 << 1)
+#define R600_CONFIG_CNTL                                        0x5424
+#define R600_CONFIG_MEMSIZE                                     0x5428
+#define R600_CONFIG_F0_BASE                                     0x542C
+#define R600_CONFIG_APER_SIZE                                   0x5430
+
+#define	R600_BIF_FB_EN						0x5490
+#define		R600_FB_READ_EN					(1 << 0)
+#define		R600_FB_WRITE_EN				(1 << 1)
+
+#define R600_CITF_CNTL           				0x200c
+#define		R600_BLACKOUT_MASK				0x00000003
+
+#define R700_MC_CITF_CNTL           				0x25c0
+
+#define R600_ROM_CNTL                              0x1600
+#       define R600_SCK_OVERWRITE                  (1 << 1)
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK  (0xf << 28)
+
+#define R600_CG_SPLL_FUNC_CNTL                     0x600
+#       define R600_SPLL_BYPASS_EN                 (1 << 3)
+#define R600_CG_SPLL_STATUS                        0x60c
+#       define R600_SPLL_CHG_STATUS                (1 << 1)
+
+#define R600_BIOS_0_SCRATCH               0x1724
+#define R600_BIOS_1_SCRATCH               0x1728
+#define R600_BIOS_2_SCRATCH               0x172c
+#define R600_BIOS_3_SCRATCH               0x1730
+#define R600_BIOS_4_SCRATCH               0x1734
+#define R600_BIOS_5_SCRATCH               0x1738
+#define R600_BIOS_6_SCRATCH               0x173c
+#define R600_BIOS_7_SCRATCH               0x1740
+
+/* Audio, these regs were reverse enginered,
+ * so the chance is high that the naming is wrong
+ * R6xx+ ??? */
+
+/* Audio clocks */
+#define R600_AUDIO_PLL1_MUL               0x0514
+#define R600_AUDIO_PLL1_DIV               0x0518
+#define R600_AUDIO_PLL2_MUL               0x0524
+#define R600_AUDIO_PLL2_DIV               0x0528
+#define R600_AUDIO_CLK_SRCSEL             0x0534
+
+/* Audio general */
+#define R600_AUDIO_ENABLE                 0x7300
+#define R600_AUDIO_TIMING                 0x7344
+
+/* Audio params */
+#define R600_AUDIO_VENDOR_ID              0x7380
+#define R600_AUDIO_REVISION_ID            0x7384
+#define R600_AUDIO_ROOT_NODE_COUNT        0x7388
+#define R600_AUDIO_NID1_NODE_COUNT        0x738c
+#define R600_AUDIO_NID1_TYPE              0x7390
+#define R600_AUDIO_SUPPORTED_SIZE_RATE    0x7394
+#define R600_AUDIO_SUPPORTED_CODEC        0x7398
+#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
+#define R600_AUDIO_NID2_CAPS              0x73a0
+#define R600_AUDIO_NID3_CAPS              0x73a4
+#define R600_AUDIO_NID3_PIN_CAPS          0x73a8
+
+/* Audio conn list */
+#define R600_AUDIO_CONN_LIST_LEN          0x73ac
+#define R600_AUDIO_CONN_LIST              0x73b0
+
+/* Audio verbs */
+#define R600_AUDIO_RATE_BPS_CHANNEL       0x73c0
+#define R600_AUDIO_PLAYING                0x73c4
+#define R600_AUDIO_IMPLEMENTATION_ID      0x73c8
+#define R600_AUDIO_CONFIG_DEFAULT         0x73cc
+#define R600_AUDIO_PIN_SENSE              0x73d0
+#define R600_AUDIO_PIN_WIDGET_CNTL        0x73d4
+#define R600_AUDIO_STATUS_BITS            0x73d8
+
+#define DCE2_HDMI_OFFSET0		(0x7400 - 0x7400)
+#define DCE2_HDMI_OFFSET1		(0x7700 - 0x7400)
+/* DCE3.2 second instance starts at 0x7800 */
+#define DCE3_HDMI_OFFSET0		(0x7400 - 0x7400)
+#define DCE3_HDMI_OFFSET1		(0x7800 - 0x7400)
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/r600_reg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r600_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,494 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned r600_reg_safe_bm[1952] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFEFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFEF, 0xFFFFFFFF, 0xCFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFBD1EFFF, 0xCF3FFFFF, 0xFFFFFFFF,
+	0xFFFFFFDF, 0xFFFFFFFF, 0xFFF0FEEF, 0xEFFFFFFF,
+	0xFFFFFFC1, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF7,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFDF, 0xFFFFFFFF, 0xFFFF7FFE, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x00000000, 0xFFFFFFF0, 0xFFFFFFFB, 0xFFFFFFFF,
+	0xFFFFFFFD, 0xFFFFFFFF, 0xFFFFFFDE, 0xFFFFFFFF,
+	0xFFFFAFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF,
+	0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF,
+	0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF,
+	0xFFFC0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFC3FF, 0xFFFFFFFF, 0x0000F0FF, 0x00000000,
+	0x000CE000, 0x00000000, 0xFFD00000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0xFFFF8000,
+	0x0001801F, 0xFC000000, 0xFFFFFFFF, 0xFFFFFE00,
+	0x7BCFFE05, 0xFE07FDEF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xF7E20000, 0xDDDF9CDD, 0xFFFFE3FD, 0xFFFFFFFF,
+	0xFFFB0E02, 0xFFFFFFFF, 0xFFFDC3E7, 0x3FFFFFFF,
+	0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xDFFFFFFF,
+};


Property changes on: trunk/sys/dev/drm2/radeon/r600_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/r600d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/r600d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/r600d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1939 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef R600D_H
+#define R600D_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/r600d.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define R6XX_MAX_SH_GPRS			256
+#define R6XX_MAX_TEMP_GPRS			16
+#define R6XX_MAX_SH_THREADS			256
+#define R6XX_MAX_SH_STACK_ENTRIES		4096
+#define R6XX_MAX_BACKENDS			8
+#define R6XX_MAX_BACKENDS_MASK			0xff
+#define R6XX_MAX_SIMDS				8
+#define R6XX_MAX_SIMDS_MASK			0xff
+#define R6XX_MAX_PIPES				8
+#define R6XX_MAX_PIPES_MASK			0xff
+
+/* PTE flags */
+/*
+ * FIXME Linux<->FreeBSD: PTE_VALID is already defined on PowerPC on FreeBSD.
+ * Fortunately, it's never used in the Radeon driver.
+ */
+/*
+#define PTE_VALID				(1 << 0)
+#define PTE_SYSTEM				(1 << 1)
+#define PTE_SNOOPED				(1 << 2)
+#define PTE_READABLE				(1 << 5)
+#define PTE_WRITEABLE				(1 << 6)
+*/
+
+/* tiling bits */
+#define     ARRAY_LINEAR_GENERAL              0x00000000
+#define     ARRAY_LINEAR_ALIGNED              0x00000001
+#define     ARRAY_1D_TILED_THIN1              0x00000002
+#define     ARRAY_2D_TILED_THIN1              0x00000004
+
+/* Registers */
+#define	ARB_POP						0x2418
+#define 	ENABLE_TC128					(1 << 30)
+#define	ARB_GDEC_RD_CNTL				0x246C
+
+#define	CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)				((x) << 16)
+
+#define R_028808_CB_COLOR_CONTROL			0x28808
+#define   S_028808_SPECIAL_OP(x)                       (((x) & 0x7) << 4)
+#define   G_028808_SPECIAL_OP(x)                       (((x) >> 4) & 0x7)
+#define   C_028808_SPECIAL_OP                          0xFFFFFF8F
+#define     V_028808_SPECIAL_NORMAL                     0x00
+#define     V_028808_SPECIAL_DISABLE                    0x01
+#define     V_028808_SPECIAL_RESOLVE_BOX                0x07
+
+#define	CB_COLOR0_BASE					0x28040
+#define	CB_COLOR1_BASE					0x28044
+#define	CB_COLOR2_BASE					0x28048
+#define	CB_COLOR3_BASE					0x2804C
+#define	CB_COLOR4_BASE					0x28050
+#define	CB_COLOR5_BASE					0x28054
+#define	CB_COLOR6_BASE					0x28058
+#define	CB_COLOR7_BASE					0x2805C
+#define	CB_COLOR7_FRAG					0x280FC
+
+#define CB_COLOR0_SIZE                                  0x28060
+#define CB_COLOR0_VIEW                                  0x28080
+#define R_028080_CB_COLOR0_VIEW                      0x028080
+#define   S_028080_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028080_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028080_SLICE_START                         0xFFFFF800
+#define   S_028080_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028080_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028080_SLICE_MAX                           0xFF001FFF
+#define R_028084_CB_COLOR1_VIEW                      0x028084
+#define R_028088_CB_COLOR2_VIEW                      0x028088
+#define R_02808C_CB_COLOR3_VIEW                      0x02808C
+#define R_028090_CB_COLOR4_VIEW                      0x028090
+#define R_028094_CB_COLOR5_VIEW                      0x028094
+#define R_028098_CB_COLOR6_VIEW                      0x028098
+#define R_02809C_CB_COLOR7_VIEW                      0x02809C
+#define R_028100_CB_COLOR0_MASK                      0x028100
+#define   S_028100_CMASK_BLOCK_MAX(x)                  (((x) & 0xFFF) << 0)
+#define   G_028100_CMASK_BLOCK_MAX(x)                  (((x) >> 0) & 0xFFF)
+#define   C_028100_CMASK_BLOCK_MAX                     0xFFFFF000
+#define   S_028100_FMASK_TILE_MAX(x)                   (((x) & 0xFFFFF) << 12)
+#define   G_028100_FMASK_TILE_MAX(x)                   (((x) >> 12) & 0xFFFFF)
+#define   C_028100_FMASK_TILE_MAX                      0x00000FFF
+#define R_028104_CB_COLOR1_MASK                      0x028104
+#define R_028108_CB_COLOR2_MASK                      0x028108
+#define R_02810C_CB_COLOR3_MASK                      0x02810C
+#define R_028110_CB_COLOR4_MASK                      0x028110
+#define R_028114_CB_COLOR5_MASK                      0x028114
+#define R_028118_CB_COLOR6_MASK                      0x028118
+#define R_02811C_CB_COLOR7_MASK                      0x02811C
+#define CB_COLOR0_INFO                                  0x280a0
+#	define CB_FORMAT(x)				((x) << 2)
+#       define CB_ARRAY_MODE(x)                         ((x) << 8)
+#	define CB_SOURCE_FORMAT(x)			((x) << 27)
+#	define CB_SF_EXPORT_FULL			0
+#	define CB_SF_EXPORT_NORM			1
+#define CB_COLOR0_TILE                                  0x280c0
+#define CB_COLOR0_FRAG                                  0x280e0
+#define CB_COLOR0_MASK                                  0x28100
+
+#define SQ_ALU_CONST_CACHE_PS_0				0x28940
+#define SQ_ALU_CONST_CACHE_PS_1				0x28944
+#define SQ_ALU_CONST_CACHE_PS_2				0x28948
+#define SQ_ALU_CONST_CACHE_PS_3				0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4				0x28950
+#define SQ_ALU_CONST_CACHE_PS_5				0x28954
+#define SQ_ALU_CONST_CACHE_PS_6				0x28958
+#define SQ_ALU_CONST_CACHE_PS_7				0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8				0x28960
+#define SQ_ALU_CONST_CACHE_PS_9				0x28964
+#define SQ_ALU_CONST_CACHE_PS_10			0x28968
+#define SQ_ALU_CONST_CACHE_PS_11			0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12			0x28970
+#define SQ_ALU_CONST_CACHE_PS_13			0x28974
+#define SQ_ALU_CONST_CACHE_PS_14			0x28978
+#define SQ_ALU_CONST_CACHE_PS_15			0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0				0x28980
+#define SQ_ALU_CONST_CACHE_VS_1				0x28984
+#define SQ_ALU_CONST_CACHE_VS_2				0x28988
+#define SQ_ALU_CONST_CACHE_VS_3				0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4				0x28990
+#define SQ_ALU_CONST_CACHE_VS_5				0x28994
+#define SQ_ALU_CONST_CACHE_VS_6				0x28998
+#define SQ_ALU_CONST_CACHE_VS_7				0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8				0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9				0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10			0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11			0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12			0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13			0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14			0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15			0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0				0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1				0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2				0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3				0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4				0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5				0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6				0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7				0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8				0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9				0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10			0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11			0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12			0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13			0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14			0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15			0x289fc
+
+#define	CONFIG_MEMSIZE					0x5428
+#define CONFIG_CNTL					0x5424
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define	CP_COHER_BASE					0x85F8
+#define	CP_DEBUG					0xC1FC
+#define	R_0086D8_CP_ME_CNTL			0x86D8
+#define		S_0086D8_CP_ME_HALT(x)			(((x) & 1)<<28)
+#define		C_0086D8_CP_ME_HALT(x)			((x) & 0xEFFFFFFF)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ_END(x)					((x) << 16)
+#define		ROQ_END(x)					((x) << 24)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_BASE					0xC100
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1U << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_ROQ_IB1_STAT					0x8784
+#define	CP_ROQ_IB2_STAT					0x8788
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	DB_DEBUG					0x9830
+#define		PREZ_MUST_WAIT_FOR_POSTZ_DONE			(1U << 31)
+#define	DB_DEPTH_BASE					0x2800C
+#define	DB_HTILE_DATA_BASE				0x28014
+#define	DB_HTILE_SURFACE				0x28D24
+#define   S_028D24_HTILE_WIDTH(x)                      (((x) & 0x1) << 0)
+#define   G_028D24_HTILE_WIDTH(x)                      (((x) >> 0) & 0x1)
+#define   C_028D24_HTILE_WIDTH                         0xFFFFFFFE
+#define   S_028D24_HTILE_HEIGHT(x)                      (((x) & 0x1) << 1)
+#define   G_028D24_HTILE_HEIGHT(x)                      (((x) >> 1) & 0x1)
+#define   C_028D24_HTILE_HEIGHT                         0xFFFFFFFD
+#define   G_028D24_LINEAR(x)                           (((x) >> 2) & 0x1)
+#define	DB_WATERMARKS					0x9838
+#define		DEPTH_FREE(x)					((x) << 0)
+#define		DEPTH_FLUSH(x)					((x) << 5)
+#define		DEPTH_PENDING_FREE(x)				((x) << 15)
+#define		DEPTH_CACHELINE_FREE(x)				((x) << 20)
+
+#define	DCP_TILING_CONFIG				0x6CA0
+#define		PIPE_TILING(x)					((x) << 1)
+#define 	BANK_TILING(x)					((x) << 4)
+#define		GROUP_SIZE(x)					((x) << 6)
+#define		ROW_TILING(x)					((x) << 8)
+#define		BANK_SWAPS(x)					((x) << 11)
+#define		SAMPLE_SPLIT(x)					((x) << 14)
+#define		BACKEND_MAP(x)					((x) << 16)
+
+#define GB_TILING_CONFIG				0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define SQ_CONFIG                                         0x8c00
+#       define VC_ENABLE                                  (1 << 0)
+#       define EXPORT_SRC_C                               (1 << 1)
+#       define DX9_CONSTS                                 (1 << 2)
+#       define ALU_INST_PREFER_VECTOR                     (1 << 3)
+#       define DX10_CLAMP                                 (1 << 4)
+#       define CLAUSE_SEQ_PRIO(x)                         ((x) << 8)
+#       define PS_PRIO(x)                                 ((x) << 24)
+#       define VS_PRIO(x)                                 ((x) << 26)
+#       define GS_PRIO(x)                                 ((x) << 28)
+#       define ES_PRIO(x)                                 ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1                            0x8c04
+#       define NUM_PS_GPRS(x)                             ((x) << 0)
+#       define NUM_VS_GPRS(x)                             ((x) << 16)
+#       define NUM_CLAUSE_TEMP_GPRS(x)                    ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2                            0x8c08
+#       define NUM_GS_GPRS(x)                             ((x) << 0)
+#       define NUM_ES_GPRS(x)                             ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT                           0x8c0c
+#       define NUM_PS_THREADS(x)                          ((x) << 0)
+#       define NUM_VS_THREADS(x)                          ((x) << 8)
+#       define NUM_GS_THREADS(x)                          ((x) << 16)
+#       define NUM_ES_THREADS(x)                          ((x) << 24)
+#define SQ_STACK_RESOURCE_MGMT_1                          0x8c10
+#       define NUM_PS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define NUM_VS_STACK_ENTRIES(x)                    ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2                          0x8c14
+#       define NUM_GS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define NUM_ES_STACK_ENTRIES(x)                    ((x) << 16)
+#define SQ_ESGS_RING_BASE                               0x8c40
+#define SQ_GSVS_RING_BASE                               0x8c48
+#define SQ_ESTMP_RING_BASE                              0x8c50
+#define SQ_GSTMP_RING_BASE                              0x8c58
+#define SQ_VSTMP_RING_BASE                              0x8c60
+#define SQ_PSTMP_RING_BASE                              0x8c68
+#define SQ_FBUF_RING_BASE                               0x8c70
+#define SQ_REDUC_RING_BASE                              0x8c78
+
+#define GRBM_CNTL                                       0x8000
+#       define GRBM_READ_TIMEOUT(x)                     ((x) << 0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000001F
+#define		GUI_ACTIVE					(1<<31)
+#define	GRBM_STATUS2					0x8014
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1<<0)
+
+#define	CG_THERMAL_STATUS				0x7F4
+#define		ASIC_T(x)			        ((x) << 0)
+#define		ASIC_T_MASK			        0x1FF
+#define		ASIC_T_SHIFT			        0
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+#define HDP_DEBUG1                                      0x2F34
+
+#define MC_VM_AGP_TOP					0x2184
+#define MC_VM_AGP_BOT					0x2188
+#define	MC_VM_AGP_BASE					0x218C
+#define MC_VM_FB_LOCATION				0x2180
+#define MC_VM_L1_TLB_MCD_RD_A_CNTL			0x219C
+#define 	ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L1_STRICT_ORDERING			(1 << 2)
+#define		SYSTEM_ACCESS_MODE_MASK				0x000000C0
+#define		SYSTEM_ACCESS_MODE_SHIFT			6
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 6)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 6)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 6)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 6)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 8)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE	(1 << 8)
+#define		ENABLE_SEMAPHORE_MODE				(1 << 10)
+#define		ENABLE_WAIT_L2_QUERY				(1 << 11)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			(((x) & 7) << 12)
+#define		EFFECTIVE_L1_TLB_SIZE_MASK			0x00007000
+#define		EFFECTIVE_L1_TLB_SIZE_SHIFT			12
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		EFFECTIVE_L1_QUEUE_SIZE_MASK			0x00038000
+#define		EFFECTIVE_L1_QUEUE_SIZE_SHIFT			15
+#define MC_VM_L1_TLB_MCD_RD_B_CNTL			0x21A0
+#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL			0x21FC
+#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL			0x2204
+#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL			0x2208
+#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL			0x220C
+#define	MC_VM_L1_TLB_MCB_RD_SYS_CNTL			0x2200
+#define MC_VM_L1_TLB_MCD_WR_A_CNTL			0x21A4
+#define MC_VM_L1_TLB_MCD_WR_B_CNTL			0x21A8
+#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL			0x2210
+#define MC_VM_L1_TLB_MCB_WR_HDP_CNTL			0x2218
+#define MC_VM_L1_TLB_MCB_WR_PDMA_CNTL			0x221C
+#define MC_VM_L1_TLB_MCB_WR_SEM_CNTL			0x2220
+#define MC_VM_L1_TLB_MCB_WR_SYS_CNTL			0x2214
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2190
+#define		LOGICAL_PAGE_NUMBER_MASK			0x000FFFFF
+#define		LOGICAL_PAGE_NUMBER_SHIFT			0
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2194
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x2198
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define PA_SC_AA_CONFIG					0x28C04
+#define	PA_SC_AA_SAMPLE_LOCS_2S				0x8B40
+#define	PA_SC_AA_SAMPLE_LOCS_4S				0x8B44
+#define	PA_SC_AA_SAMPLE_LOCS_8S_WD0			0x8B48
+#define	PA_SC_AA_SAMPLE_LOCS_8S_WD1			0x8B4C
+#define		S0_X(x)						((x) << 0)
+#define		S0_Y(x)						((x) << 4)
+#define		S1_X(x)						((x) << 8)
+#define		S1_Y(x)						((x) << 12)
+#define		S2_X(x)						((x) << 16)
+#define		S2_Y(x)						((x) << 20)
+#define		S3_X(x)						((x) << 24)
+#define		S3_Y(x)						((x) << 28)
+#define		S4_X(x)						((x) << 0)
+#define		S4_Y(x)						((x) << 4)
+#define		S5_X(x)						((x) << 8)
+#define		S5_Y(x)						((x) << 12)
+#define		S6_X(x)						((x) << 16)
+#define		S6_Y(x)						((x) << 20)
+#define		S7_X(x)						((x) << 24)
+#define		S7_Y(x)						((x) << 28)
+#define PA_SC_CLIPRECT_RULE				0x2820c
+#define	PA_SC_ENHANCE					0x8BF0
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_TILE_CNT(x)			((x) << 12)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define PA_SC_MODE_CNTL					0x28A4C
+#define	PA_SC_MULTI_CHIP_CNTL				0x8B20
+
+#define PA_SC_SCREEN_SCISSOR_TL                         0x28030
+#define PA_SC_GENERIC_SCISSOR_TL                        0x28240
+#define PA_SC_WINDOW_SCISSOR_TL                         0x28204
+
+#define	PCIE_PORT_INDEX					0x0038
+#define	PCIE_PORT_DATA					0x003C
+
+#define CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+
+#define RAMCFG						0x2408
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000001
+#define		NOOFRANK_SHIFT					1
+#define		NOOFRANK_MASK					0x00000002
+#define		NOOFROWS_SHIFT					2
+#define		NOOFROWS_MASK					0x0000001C
+#define		NOOFCOLS_SHIFT					5
+#define		NOOFCOLS_MASK					0x00000060
+#define		CHANSIZE_SHIFT					7
+#define		CHANSIZE_MASK					0x00000080
+#define		BURSTLENGTH_SHIFT				8
+#define		BURSTLENGTH_MASK				0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 10)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define		DISABLE_INTERP_1				(1 << 5)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+#define	SPI_PS_IN_CONTROL_1				0x286D0
+#define		GEN_INDEX_PIX					(1<<0)
+#define		GEN_INDEX_PIX_ADDR(x)				((x)<<1)
+#define		FRONT_FACE_ENA					(1<<8)
+#define		FRONT_FACE_CHAN(x)				((x)<<9)
+#define		FRONT_FACE_ALL_BITS				(1<<11)
+#define		FRONT_FACE_ADDR(x)				((x)<<12)
+#define		FOG_ADDR(x)					((x)<<17)
+#define		FIXED_PT_POSITION_ENA				(1<<24)
+#define		FIXED_PT_POSITION_ADDR(x)			((x)<<25)
+
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define	SQ_PGM_START_ES					0x28880
+#define	SQ_PGM_START_FS					0x28894
+#define	SQ_PGM_START_GS					0x2886C
+#define	SQ_PGM_START_PS					0x28840
+#define SQ_PGM_RESOURCES_PS                             0x28850
+#define SQ_PGM_EXPORTS_PS                               0x28854
+#define SQ_PGM_CF_OFFSET_PS                             0x288cc
+#define	SQ_PGM_START_VS					0x28858
+#define SQ_PGM_RESOURCES_VS                             0x28868
+#define SQ_PGM_CF_OFFSET_VS                             0x288d0
+
+#define SQ_VTX_CONSTANT_WORD0_0				0x30000
+#define SQ_VTX_CONSTANT_WORD1_0				0x30004
+#define SQ_VTX_CONSTANT_WORD2_0				0x30008
+#	define SQ_VTXC_BASE_ADDR_HI(x)			((x) << 0)
+#	define SQ_VTXC_STRIDE(x)			((x) << 8)
+#	define SQ_VTXC_ENDIAN_SWAP(x)			((x) << 30)
+#	define SQ_ENDIAN_NONE				0
+#	define SQ_ENDIAN_8IN16				1
+#	define SQ_ENDIAN_8IN32				2
+#define SQ_VTX_CONSTANT_WORD3_0				0x3000c
+#define	SQ_VTX_CONSTANT_WORD6_0				0x38018
+#define		S__SQ_VTX_CONSTANT_TYPE(x)			(((x) & 3) << 30)
+#define		G__SQ_VTX_CONSTANT_TYPE(x)			(((x) >> 30) & 3)
+#define			SQ_TEX_VTX_INVALID_TEXTURE			0x0
+#define			SQ_TEX_VTX_INVALID_BUFFER			0x1
+#define			SQ_TEX_VTX_VALID_TEXTURE			0x2
+#define			SQ_TEX_VTX_VALID_BUFFER				0x3
+
+
+#define	SX_MISC						0x28350
+#define	SX_MEMORY_EXPORT_BASE				0x9010
+#define	SX_DEBUG_1					0x9054
+#define		SMX_EVENT_RELEASE				(1 << 0)
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+#define		BILINEAR_PRECISION_6_BIT			(0 << 31)
+#define		BILINEAR_PRECISION_8_BIT			(1U << 31)
+
+#define	TC_CNTL						0x9608
+#define		TC_L2_SIZE(x)					((x)<<5)
+#define		L2_DISABLE_LATE_HIT				(1<<9)
+
+#define	VC_ENHANCE					0x9714
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x)<<0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define	VGT_DMA_BASE					0x287E8
+#define	VGT_DMA_BASE_HI					0x287E4
+#define	VGT_ES_PER_GS					0x88CC
+#define	VGT_GS_PER_ES					0x88C8
+#define	VGT_GS_PER_VS					0x88E8
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_STRMOUT_BASE_OFFSET_0			0x28B10
+#define	VGT_STRMOUT_BASE_OFFSET_1			0x28B14
+#define	VGT_STRMOUT_BASE_OFFSET_2			0x28B18
+#define	VGT_STRMOUT_BASE_OFFSET_3			0x28B1c
+#define	VGT_STRMOUT_BASE_OFFSET_HI_0			0x28B44
+#define	VGT_STRMOUT_BASE_OFFSET_HI_1			0x28B48
+#define	VGT_STRMOUT_BASE_OFFSET_HI_2			0x28B4c
+#define	VGT_STRMOUT_BASE_OFFSET_HI_3			0x28B50
+#define	VGT_STRMOUT_BUFFER_BASE_0			0x28AD8
+#define	VGT_STRMOUT_BUFFER_BASE_1			0x28AE8
+#define	VGT_STRMOUT_BUFFER_BASE_2			0x28AF8
+#define	VGT_STRMOUT_BUFFER_BASE_3			0x28B08
+#define	VGT_STRMOUT_BUFFER_OFFSET_0			0x28ADC
+#define	VGT_STRMOUT_BUFFER_OFFSET_1			0x28AEC
+#define	VGT_STRMOUT_BUFFER_OFFSET_2			0x28AFC
+#define	VGT_STRMOUT_BUFFER_OFFSET_3			0x28B0C
+#define VGT_STRMOUT_BUFFER_SIZE_0			0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1			0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2			0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3			0x28B00
+
+#define	VGT_STRMOUT_EN					0x28AB0
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define VM_CONTEXT0_INVALIDATION_LOW_ADDR		0x1490
+#define VM_CONTEXT0_INVALIDATION_HIGH_ADDR		0x14B0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x1574
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x1594
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x15B4
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1554
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 13)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT_0(x)				(((x) & 0x1f) << 0)
+#define		BANK_SELECT_1(x)				(((x) & 0x1f) << 5)
+#define		L2_CACHE_UPDATE_MODE(x)				(((x) & 3) << 10)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+
+#define	WAIT_UNTIL					0x8040
+#define         WAIT_2D_IDLE_bit                                (1 << 14)
+#define         WAIT_3D_IDLE_bit                                (1 << 15)
+#define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
+#define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
+
+/* async DMA */
+#define DMA_TILING_CONFIG                                 0x3ec4
+#define DMA_CONFIG                                        0x3e4c
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_MODE                                          0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_CONSTANT_FILL			  0xd /* 7xx only */
+#define	DMA_PACKET_NOP					  0xf
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_RB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1U << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define RLC_CNTL                                          0x3f00
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_HB_BASE                                       0x3f10
+#define RLC_HB_CNTL                                       0x3f0c
+#define RLC_HB_RPTR                                       0x3f20
+#define RLC_HB_WPTR                                       0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR                              0x3f14
+#define RLC_HB_WPTR_MSB_ADDR                              0x3f18
+#define RLC_GPU_CLOCK_COUNT_LSB				  0x3f38
+#define RLC_GPU_CLOCK_COUNT_MSB				  0x3f3c
+#define RLC_CAPTURE_GPU_CLOCK_COUNT			  0x3f40
+#define RLC_MC_CNTL                                       0x3f44
+#define RLC_UCODE_CNTL                                    0x3f48
+#define RLC_UCODE_ADDR                                    0x3f2c
+#define RLC_UCODE_DATA                                    0x3f30
+
+/* new for TN */
+#define TN_RLC_SAVE_AND_RESTORE_BASE                      0x3f10
+#define TN_RLC_CLEAR_STATE_RESTORE_BASE                   0x3f20
+
+#define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_DMA                             (1 << 12)
+#       define SOFT_RESET_RLC                             (1 << 13)
+#       define RV770_SOFT_RESET_DMA                       (1 << 20)
+
+#define CP_INT_CNTL                                       0xc124
+#       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                      (1 << 20)
+#       define SCRATCH_INT_ENABLE                         (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                      (1 << 26)
+#       define IB2_INT_ENABLE                             (1 << 29)
+#       define IB1_INT_ENABLE                             (1 << 30)
+#       define RB_INT_ENABLE                              (1U << 31)
+#define CP_INT_STATUS                                     0xc128
+#       define SCRATCH_INT_STAT                           (1 << 25)
+#       define TIME_STAMP_INT_STAT                        (1 << 26)
+#       define IB2_INT_STAT                               (1 << 29)
+#       define IB1_INT_STAT                               (1 << 30)
+#       define RB_INT_STAT                                (1U << 31)
+
+#define GRBM_INT_CNTL                                     0x8060
+#       define RDERR_INT_ENABLE                           (1 << 0)
+#       define WAIT_COUNT_TIMEOUT_INT_ENABLE              (1 << 1)
+#       define GUI_IDLE_INT_ENABLE                        (1 << 19)
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define D1MODE_VBLANK_STATUS                              0x6534
+#define D2MODE_VBLANK_STATUS                              0x6d34
+#       define DxMODE_VBLANK_OCCURRED                     (1 << 0)
+#       define DxMODE_VBLANK_ACK                          (1 << 4)
+#       define DxMODE_VBLANK_STAT                         (1 << 12)
+#       define DxMODE_VBLANK_INTERRUPT                    (1 << 16)
+#       define DxMODE_VBLANK_INTERRUPT_TYPE               (1 << 17)
+#define D1MODE_VLINE_STATUS                               0x653c
+#define D2MODE_VLINE_STATUS                               0x6d3c
+#       define DxMODE_VLINE_OCCURRED                      (1 << 0)
+#       define DxMODE_VLINE_ACK                           (1 << 4)
+#       define DxMODE_VLINE_STAT                          (1 << 12)
+#       define DxMODE_VLINE_INTERRUPT                     (1 << 16)
+#       define DxMODE_VLINE_INTERRUPT_TYPE                (1 << 17)
+#define DxMODE_INT_MASK                                   0x6540
+#       define D1MODE_VBLANK_INT_MASK                     (1 << 0)
+#       define D1MODE_VLINE_INT_MASK                      (1 << 4)
+#       define D2MODE_VBLANK_INT_MASK                     (1 << 8)
+#       define D2MODE_VLINE_INT_MASK                      (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS                        0x7ddc
+#       define DC_HPD1_INTERRUPT                          (1 << 18)
+#       define DC_HPD2_INTERRUPT                          (1 << 19)
+#define DISP_INTERRUPT_STATUS                             0x7edc
+#       define LB_D1_VLINE_INTERRUPT                      (1 << 2)
+#       define LB_D2_VLINE_INTERRUPT                      (1 << 3)
+#       define LB_D1_VBLANK_INTERRUPT                     (1 << 4)
+#       define LB_D2_VBLANK_INTERRUPT                     (1 << 5)
+#       define DACA_AUTODETECT_INTERRUPT                  (1 << 16)
+#       define DACB_AUTODETECT_INTERRUPT                  (1 << 17)
+#       define DC_HOT_PLUG_DETECT1_INTERRUPT              (1 << 18)
+#       define DC_HOT_PLUG_DETECT2_INTERRUPT              (1 << 19)
+#       define DC_I2C_SW_DONE_INTERRUPT                   (1 << 20)
+#       define DC_I2C_HW_DONE_INTERRUPT                   (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE                    0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE               0x7de8
+#       define DC_HPD4_INTERRUPT                          (1 << 14)
+#       define DC_HPD4_RX_INTERRUPT                       (1 << 15)
+#       define DC_HPD3_INTERRUPT                          (1 << 28)
+#       define DC_HPD1_RX_INTERRUPT                       (1 << 29)
+#       define DC_HPD2_RX_INTERRUPT                       (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2              0x7dec
+#       define DC_HPD3_RX_INTERRUPT                       (1 << 0)
+#       define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 1)
+#       define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 2)
+#       define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 3)
+#       define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 4)
+#       define AUX1_SW_DONE_INTERRUPT                     (1 << 5)
+#       define AUX1_LS_DONE_INTERRUPT                     (1 << 6)
+#       define AUX2_SW_DONE_INTERRUPT                     (1 << 7)
+#       define AUX2_LS_DONE_INTERRUPT                     (1 << 8)
+#       define AUX3_SW_DONE_INTERRUPT                     (1 << 9)
+#       define AUX3_LS_DONE_INTERRUPT                     (1 << 10)
+#       define AUX4_SW_DONE_INTERRUPT                     (1 << 11)
+#       define AUX4_LS_DONE_INTERRUPT                     (1 << 12)
+#       define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 13)
+#       define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 14)
+/* DCE 3.2 */
+#       define AUX5_SW_DONE_INTERRUPT                     (1 << 15)
+#       define AUX5_LS_DONE_INTERRUPT                     (1 << 16)
+#       define AUX6_SW_DONE_INTERRUPT                     (1 << 17)
+#       define AUX6_LS_DONE_INTERRUPT                     (1 << 18)
+#       define DC_HPD5_INTERRUPT                          (1 << 19)
+#       define DC_HPD5_RX_INTERRUPT                       (1 << 20)
+#       define DC_HPD6_INTERRUPT                          (1 << 21)
+#       define DC_HPD6_RX_INTERRUPT                       (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL                          0x7828
+#define DACB_AUTO_DETECT_CONTROL                          0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL                     0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL                     0x7128
+#       define DACx_AUTODETECT_MODE(x)                    ((x) << 0)
+#       define DACx_AUTODETECT_MODE_NONE                  0
+#       define DACx_AUTODETECT_MODE_CONNECT               1
+#       define DACx_AUTODETECT_MODE_DISCONNECT            2
+#       define DACx_AUTODETECT_FRAME_TIME_COUNTER(x)      ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+#       define DACx_AUTODETECT_CHECK_MASK(x)              ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL                  0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL                  0x7138
+#define DACA_AUTODETECT_INT_CONTROL                       0x7838
+#define DACB_AUTODETECT_INT_CONTROL                       0x7a38
+#       define DACx_AUTODETECT_ACK                        (1 << 0)
+#       define DACx_AUTODETECT_INT_ENABLE                 (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL                       0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL                       0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL                       0x7d24
+#       define DC_HOT_PLUG_DETECTx_EN                     (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS                    0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS                    0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS                    0x7d28
+#       define DC_HOT_PLUG_DETECTx_INT_STATUS             (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_SENSE                  (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS                                0x7d00
+#define DC_HPD2_INT_STATUS                                0x7d0c
+#define DC_HPD3_INT_STATUS                                0x7d18
+#define DC_HPD4_INT_STATUS                                0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS                                0x7dc0
+#define DC_HPD6_INT_STATUS                                0x7df4
+#       define DC_HPDx_INT_STATUS                         (1 << 0)
+#       define DC_HPDx_SENSE                              (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                      (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL                   0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL                   0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL                   0x7d2c
+#       define DC_HOT_PLUG_DETECTx_INT_ACK                (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_INT_POLARITY           (1 << 8)
+#       define DC_HOT_PLUG_DETECTx_INT_EN                 (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL                               0x7d04
+#define DC_HPD2_INT_CONTROL                               0x7d10
+#define DC_HPD3_INT_CONTROL                               0x7d1c
+#define DC_HPD4_INT_CONTROL                               0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL                               0x7dc4
+#define DC_HPD6_INT_CONTROL                               0x7df8
+#       define DC_HPDx_INT_ACK                            (1 << 0)
+#       define DC_HPDx_INT_POLARITY                       (1 << 8)
+#       define DC_HPDx_INT_EN                             (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                         (1 << 20)
+#       define DC_HPDx_RX_INT_EN                          (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL                                   0x7d08
+#define DC_HPD2_CONTROL                                   0x7d14
+#define DC_HPD3_CONTROL                                   0x7d20
+#define DC_HPD4_CONTROL                                   0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL                                   0x7dc8
+#define DC_HPD6_CONTROL                                   0x7dfc
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+/* DCE 3.2 */
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define D1GRPH_INTERRUPT_STATUS                           0x6158
+#define D2GRPH_INTERRUPT_STATUS                           0x6958
+#       define DxGRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define DxGRPH_PFLIP_INT_CLEAR                     (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL                          0x615c
+#define D2GRPH_INTERRUPT_CONTROL                          0x695c
+#       define DxGRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define DxGRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#       define LC_POINT_7_PLUS_EN                         (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO0_PHASE             0x0514
+#define DCCG_AUDIO_DTO0_MODULE            0x0518
+#define DCCG_AUDIO_DTO0_LOAD              0x051c
+#       define DTO_LOAD                   (1U << 31)
+#define DCCG_AUDIO_DTO0_CNTL              0x0520
+
+#define DCCG_AUDIO_DTO1_PHASE             0x0524
+#define DCCG_AUDIO_DTO1_MODULE            0x0528
+#define DCCG_AUDIO_DTO1_LOAD              0x052c
+#define DCCG_AUDIO_DTO1_CNTL              0x0530
+
+#define DCCG_AUDIO_DTO_SELECT             0x0534
+
+/* digital blocks */
+#define TMDSA_CNTL                       0x7880
+#       define TMDSA_HDMI_EN             (1 << 2)
+#define LVTMA_CNTL                       0x7a80
+#       define LVTMA_HDMI_EN             (1 << 2)
+#define DDIA_CNTL                        0x7200
+#       define DDIA_HDMI_EN              (1 << 2)
+#define DIG0_CNTL                        0x75a0
+#       define DIG_MODE(x)               (((x) & 7) << 8)
+#       define DIG_MODE_DP               0
+#       define DIG_MODE_LVDS             1
+#       define DIG_MODE_TMDS_DVI         2
+#       define DIG_MODE_TMDS_HDMI        3
+#       define DIG_MODE_SDVO             4
+#define DIG1_CNTL                        0x79a0
+
+/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
+ * instance of the blocks while r6xx has 2.  DCE 3.0 cards are slightly
+ * different due to the new DIG blocks, but also have 2 instances.
+ * DCE 3.0 HDMI blocks are part of each DIG encoder.
+ */
+
+/* rs6xx/rs740/r6xx/dce3 */
+#define HDMI0_CONTROL                0x7400
+/* rs6xx/rs740/r6xx */
+#       define HDMI0_ENABLE          (1 << 0)
+#       define HDMI0_STREAM(x)       (((x) & 3) << 2)
+#       define HDMI0_STREAM_TMDSA    0
+#       define HDMI0_STREAM_LVTMA    1
+#       define HDMI0_STREAM_DVOA     2
+#       define HDMI0_STREAM_DDIA     3
+/* rs6xx/r6xx/dce3 */
+#       define HDMI0_ERROR_ACK       (1 << 8)
+#       define HDMI0_ERROR_MASK      (1 << 9)
+#define HDMI0_STATUS                 0x7404
+#       define HDMI0_ACTIVE_AVMUTE   (1 << 0)
+#       define HDMI0_AUDIO_ENABLE    (1 << 4)
+#       define HDMI0_AZ_FORMAT_WTRIG     (1 << 28)
+#       define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
+#define HDMI0_AUDIO_PACKET_CONTROL   0x7408
+#       define HDMI0_AUDIO_SAMPLE_SEND  (1 << 0)
+#       define HDMI0_AUDIO_DELAY_EN(x)  (((x) & 3) << 4)
+#       define HDMI0_AUDIO_SEND_MAX_PACKETS  (1 << 8)
+#       define HDMI0_AUDIO_TEST_EN         (1 << 12)
+#       define HDMI0_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#       define HDMI0_AUDIO_CHANNEL_SWAP    (1 << 24)
+#       define HDMI0_60958_CS_UPDATE       (1 << 26)
+#       define HDMI0_AZ_FORMAT_WTRIG_MASK  (1 << 28)
+#       define HDMI0_AZ_FORMAT_WTRIG_ACK   (1 << 29)
+#define HDMI0_AUDIO_CRC_CONTROL      0x740c
+#       define HDMI0_AUDIO_CRC_EN    (1 << 0)
+#define HDMI0_VBI_PACKET_CONTROL     0x7410
+#       define HDMI0_NULL_SEND       (1 << 0)
+#       define HDMI0_GC_SEND         (1 << 4)
+#       define HDMI0_GC_CONT         (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI0_INFOFRAME_CONTROL0     0x7414
+#       define HDMI0_AVI_INFO_SEND   (1 << 0)
+#       define HDMI0_AVI_INFO_CONT   (1 << 1)
+#       define HDMI0_AUDIO_INFO_SEND (1 << 4)
+#       define HDMI0_AUDIO_INFO_CONT (1 << 5)
+#       define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+#       define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
+#       define HDMI0_MPEG_INFO_SEND  (1 << 8)
+#       define HDMI0_MPEG_INFO_CONT  (1 << 9)
+#       define HDMI0_MPEG_INFO_UPDATE  (1 << 10)
+#define HDMI0_INFOFRAME_CONTROL1     0x7418
+#       define HDMI0_AVI_INFO_LINE(x)  (((x) & 0x3f) << 0)
+#       define HDMI0_AUDIO_INFO_LINE(x)  (((x) & 0x3f) << 8)
+#       define HDMI0_MPEG_INFO_LINE(x)  (((x) & 0x3f) << 16)
+#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
+#       define HDMI0_GENERIC0_SEND   (1 << 0)
+#       define HDMI0_GENERIC0_CONT   (1 << 1)
+#       define HDMI0_GENERIC0_UPDATE (1 << 2)
+#       define HDMI0_GENERIC1_SEND   (1 << 4)
+#       define HDMI0_GENERIC1_CONT   (1 << 5)
+#       define HDMI0_GENERIC0_LINE(x)  (((x) & 0x3f) << 16)
+#       define HDMI0_GENERIC1_LINE(x)  (((x) & 0x3f) << 24)
+#define HDMI0_GC                     0x7428
+#       define HDMI0_GC_AVMUTE       (1 << 0)
+#define HDMI0_AVI_INFO0              0x7454
+#       define HDMI0_AVI_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AVI_INFO_S(x)   (((x) & 3) << 8)
+#       define HDMI0_AVI_INFO_B(x)   (((x) & 3) << 10)
+#       define HDMI0_AVI_INFO_A(x)   (((x) & 1) << 12)
+#       define HDMI0_AVI_INFO_Y(x)   (((x) & 3) << 13)
+#       define HDMI0_AVI_INFO_Y_RGB       0
+#       define HDMI0_AVI_INFO_Y_YCBCR422  1
+#       define HDMI0_AVI_INFO_Y_YCBCR444  2
+#       define HDMI0_AVI_INFO_Y_A_B_S(x)   (((x) & 0xff) << 8)
+#       define HDMI0_AVI_INFO_R(x)   (((x) & 0xf) << 16)
+#       define HDMI0_AVI_INFO_M(x)   (((x) & 0x3) << 20)
+#       define HDMI0_AVI_INFO_C(x)   (((x) & 0x3) << 22)
+#       define HDMI0_AVI_INFO_C_M_R(x)   (((x) & 0xff) << 16)
+#       define HDMI0_AVI_INFO_SC(x)  (((x) & 0x3) << 24)
+#       define HDMI0_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define HDMI0_AVI_INFO1              0x7458
+#       define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define HDMI0_AVI_INFO_PR(x)  (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO2              0x745c
+#       define HDMI0_AVI_INFO_BOTTOM(x)  (((x) & 0xffff) << 0)
+#       define HDMI0_AVI_INFO_LEFT(x)    (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO3              0x7460
+#       define HDMI0_AVI_INFO_RIGHT(x)    (((x) & 0xffff) << 0)
+#       define HDMI0_AVI_INFO_VERSION(x)  (((x) & 3) << 24)
+#define HDMI0_MPEG_INFO0             0x7464
+#       define HDMI0_MPEG_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_MPEG_INFO_MB0(x)  (((x) & 0xff) << 8)
+#       define HDMI0_MPEG_INFO_MB1(x)  (((x) & 0xff) << 16)
+#       define HDMI0_MPEG_INFO_MB2(x)  (((x) & 0xff) << 24)
+#define HDMI0_MPEG_INFO1             0x7468
+#       define HDMI0_MPEG_INFO_MB3(x)  (((x) & 0xff) << 0)
+#       define HDMI0_MPEG_INFO_MF(x)   (((x) & 3) << 8)
+#       define HDMI0_MPEG_INFO_FR(x)   (((x) & 1) << 12)
+#define HDMI0_GENERIC0_HDR           0x746c
+#define HDMI0_GENERIC0_0             0x7470
+#define HDMI0_GENERIC0_1             0x7474
+#define HDMI0_GENERIC0_2             0x7478
+#define HDMI0_GENERIC0_3             0x747c
+#define HDMI0_GENERIC0_4             0x7480
+#define HDMI0_GENERIC0_5             0x7484
+#define HDMI0_GENERIC0_6             0x7488
+#define HDMI0_GENERIC1_HDR           0x748c
+#define HDMI0_GENERIC1_0             0x7490
+#define HDMI0_GENERIC1_1             0x7494
+#define HDMI0_GENERIC1_2             0x7498
+#define HDMI0_GENERIC1_3             0x749c
+#define HDMI0_GENERIC1_4             0x74a0
+#define HDMI0_GENERIC1_5             0x74a4
+#define HDMI0_GENERIC1_6             0x74a8
+#define HDMI0_ACR_32_0               0x74ac
+#       define HDMI0_ACR_CTS_32(x)   (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_32_1               0x74b0
+#       define HDMI0_ACR_N_32(x)   (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_44_0               0x74b4
+#       define HDMI0_ACR_CTS_44(x)   (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_44_1               0x74b8
+#       define HDMI0_ACR_N_44(x)   (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_48_0               0x74bc
+#       define HDMI0_ACR_CTS_48(x)   (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_48_1               0x74c0
+#       define HDMI0_ACR_N_48(x)   (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_STATUS_0           0x74c4
+#define HDMI0_ACR_STATUS_1           0x74c8
+#define HDMI0_AUDIO_INFO0            0x74cc
+#       define HDMI0_AUDIO_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AUDIO_INFO_CC(x)  (((x) & 7) << 8)
+#define HDMI0_AUDIO_INFO1            0x74d0
+#       define HDMI0_AUDIO_INFO_CA(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AUDIO_INFO_LSV(x)  (((x) & 0xf) << 11)
+#       define HDMI0_AUDIO_INFO_DM_INH(x)  (((x) & 1) << 15)
+#       define HDMI0_AUDIO_INFO_DM_INH_LSV(x)  (((x) & 0xff) << 8)
+#define HDMI0_60958_0                0x74d4
+#       define HDMI0_60958_CS_A(x)   (((x) & 1) << 0)
+#       define HDMI0_60958_CS_B(x)   (((x) & 1) << 1)
+#       define HDMI0_60958_CS_C(x)   (((x) & 1) << 2)
+#       define HDMI0_60958_CS_D(x)   (((x) & 3) << 3)
+#       define HDMI0_60958_CS_MODE(x)   (((x) & 3) << 6)
+#       define HDMI0_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define HDMI0_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define HDMI0_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#define HDMI0_60958_1                0x74d8
+#       define HDMI0_60958_CS_WORD_LENGTH(x)        (((x) & 0xf) << 0)
+#       define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define HDMI0_60958_CS_VALID_L(x)   (((x) & 1) << 16)
+#       define HDMI0_60958_CS_VALID_R(x)   (((x) & 1) << 18)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#define HDMI0_ACR_PACKET_CONTROL     0x74dc
+#       define HDMI0_ACR_SEND        (1 << 0)
+#       define HDMI0_ACR_CONT        (1 << 1)
+#       define HDMI0_ACR_SELECT(x)   (((x) & 3) << 4)
+#       define HDMI0_ACR_HW          0
+#       define HDMI0_ACR_32          1
+#       define HDMI0_ACR_44          2
+#       define HDMI0_ACR_48          3
+#       define HDMI0_ACR_SOURCE      (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI0_ACR_AUTO_SEND   (1 << 12)
+#define HDMI0_RAMP_CONTROL0          0x74e0
+#       define HDMI0_RAMP_MAX_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL1          0x74e4
+#       define HDMI0_RAMP_MIN_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL2          0x74e8
+#       define HDMI0_RAMP_INC_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL3          0x74ec
+#       define HDMI0_RAMP_DEC_COUNT(x)   (((x) & 0xffffff) << 0)
+/* HDMI0_60958_2 is r7xx only */
+#define HDMI0_60958_2                0x74f0
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+/* r6xx only; second instance starts at 0x7700 */
+#define HDMI1_CONTROL                0x7700
+#define HDMI1_STATUS                 0x7704
+#define HDMI1_AUDIO_PACKET_CONTROL   0x7708
+/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
+#define DCE3_HDMI1_CONTROL                0x7800
+#define DCE3_HDMI1_STATUS                 0x7804
+#define DCE3_HDMI1_AUDIO_PACKET_CONTROL   0x7808
+/* DCE3.2 (for interrupts) */
+#define AFMT_STATUS                          0x7600
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x7604
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+
+/*
+ * PM4
+ */
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n)	((PACKET_TYPE0 << 30) |				\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define PACKET3(op, n)	((PACKET_TYPE3 << 30) |				\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_START_3D_CMDBUF				0x24
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_IMMD_BE			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_INDIRECT_BUFFER_MP			0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#              define PACKET3_SEM_WAIT_ON_SIGNAL    (0x1 << 12)
+#              define PACKET3_SEM_SEL_SIGNAL	    (0x6 << 29)
+#              define PACKET3_SEM_SEL_WAIT	    (0x7 << 29)
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_CP_SYNC       (1U << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_VC_ACTION_ENA        (1 << 24)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SMX_ACTION_ENA       (1 << 28)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - TS events
+		 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_OFFSET			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_OFFSET			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+#define		PACKET3_SET_ALU_CONST_OFFSET			0x00030000
+#define		PACKET3_SET_ALU_CONST_END			0x00032000
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_OFFSET			0x0003e380
+#define		PACKET3_SET_BOOL_CONST_END			0x00040000
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_OFFSET			0x0003e200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003e380
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_OFFSET			0x00038000
+#define		PACKET3_SET_RESOURCE_END			0x0003c000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_OFFSET			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003cff0
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_OFFSET			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003e200
+#define	PACKET3_STRMOUT_BASE_UPDATE			0x72 /* r7xx */
+#define	PACKET3_SURFACE_BASE_UPDATE			0x73
+
+
+#define	R_008020_GRBM_SOFT_RESET		0x8020
+#define		S_008020_SOFT_RESET_CP(x)		(((x) & 1) << 0)
+#define		S_008020_SOFT_RESET_CB(x)		(((x) & 1) << 1)
+#define		S_008020_SOFT_RESET_CR(x)		(((x) & 1) << 2)
+#define		S_008020_SOFT_RESET_DB(x)		(((x) & 1) << 3)
+#define		S_008020_SOFT_RESET_PA(x)		(((x) & 1) << 5)
+#define		S_008020_SOFT_RESET_SC(x)		(((x) & 1) << 6)
+#define		S_008020_SOFT_RESET_SMX(x)		(((x) & 1) << 7)
+#define		S_008020_SOFT_RESET_SPI(x)		(((x) & 1) << 8)
+#define		S_008020_SOFT_RESET_SH(x)		(((x) & 1) << 9)
+#define		S_008020_SOFT_RESET_SX(x)		(((x) & 1) << 10)
+#define		S_008020_SOFT_RESET_TC(x)		(((x) & 1) << 11)
+#define		S_008020_SOFT_RESET_TA(x)		(((x) & 1) << 12)
+#define		S_008020_SOFT_RESET_VC(x)		(((x) & 1) << 13)
+#define		S_008020_SOFT_RESET_VGT(x)		(((x) & 1) << 14)
+#define	R_008010_GRBM_STATUS			0x8010
+#define		S_008010_CMDFIFO_AVAIL(x)		(((x) & 0x1F) << 0)
+#define		S_008010_CP_RQ_PENDING(x)		(((x) & 1) << 6)
+#define		S_008010_CF_RQ_PENDING(x)		(((x) & 1) << 7)
+#define		S_008010_PF_RQ_PENDING(x)		(((x) & 1) << 8)
+#define		S_008010_GRBM_EE_BUSY(x)		(((x) & 1) << 10)
+#define		S_008010_VC_BUSY(x)			(((x) & 1) << 11)
+#define		S_008010_DB03_CLEAN(x)			(((x) & 1) << 12)
+#define		S_008010_CB03_CLEAN(x)			(((x) & 1) << 13)
+#define		S_008010_VGT_BUSY_NO_DMA(x)		(((x) & 1) << 16)
+#define		S_008010_VGT_BUSY(x)			(((x) & 1) << 17)
+#define		S_008010_TA03_BUSY(x)			(((x) & 1) << 18)
+#define		S_008010_TC_BUSY(x)			(((x) & 1) << 19)
+#define		S_008010_SX_BUSY(x)			(((x) & 1) << 20)
+#define		S_008010_SH_BUSY(x)			(((x) & 1) << 21)
+#define		S_008010_SPI03_BUSY(x)			(((x) & 1) << 22)
+#define		S_008010_SMX_BUSY(x)			(((x) & 1) << 23)
+#define		S_008010_SC_BUSY(x)			(((x) & 1) << 24)
+#define		S_008010_PA_BUSY(x)			(((x) & 1) << 25)
+#define		S_008010_DB03_BUSY(x)			(((x) & 1) << 26)
+#define		S_008010_CR_BUSY(x)			(((x) & 1) << 27)
+#define		S_008010_CP_COHERENCY_BUSY(x)		(((x) & 1) << 28)
+#define		S_008010_CP_BUSY(x)			(((x) & 1) << 29)
+#define		S_008010_CB03_BUSY(x)			(((x) & 1) << 30)
+#define		S_008010_GUI_ACTIVE(x)			(((x) & 1) << 31)
+#define		G_008010_CMDFIFO_AVAIL(x)		(((x) >> 0) & 0x1F)
+#define		G_008010_CP_RQ_PENDING(x)		(((x) >> 6) & 1)
+#define		G_008010_CF_RQ_PENDING(x)		(((x) >> 7) & 1)
+#define		G_008010_PF_RQ_PENDING(x)		(((x) >> 8) & 1)
+#define		G_008010_GRBM_EE_BUSY(x)		(((x) >> 10) & 1)
+#define		G_008010_VC_BUSY(x)			(((x) >> 11) & 1)
+#define		G_008010_DB03_CLEAN(x)			(((x) >> 12) & 1)
+#define		G_008010_CB03_CLEAN(x)			(((x) >> 13) & 1)
+#define		G_008010_VGT_BUSY_NO_DMA(x)		(((x) >> 16) & 1)
+#define		G_008010_VGT_BUSY(x)			(((x) >> 17) & 1)
+#define		G_008010_TA03_BUSY(x)			(((x) >> 18) & 1)
+#define		G_008010_TC_BUSY(x)			(((x) >> 19) & 1)
+#define		G_008010_SX_BUSY(x)			(((x) >> 20) & 1)
+#define		G_008010_SH_BUSY(x)			(((x) >> 21) & 1)
+#define		G_008010_SPI03_BUSY(x)			(((x) >> 22) & 1)
+#define		G_008010_SMX_BUSY(x)			(((x) >> 23) & 1)
+#define		G_008010_SC_BUSY(x)			(((x) >> 24) & 1)
+#define		G_008010_PA_BUSY(x)			(((x) >> 25) & 1)
+#define		G_008010_DB03_BUSY(x)			(((x) >> 26) & 1)
+#define		G_008010_CR_BUSY(x)			(((x) >> 27) & 1)
+#define		G_008010_CP_COHERENCY_BUSY(x)		(((x) >> 28) & 1)
+#define		G_008010_CP_BUSY(x)			(((x) >> 29) & 1)
+#define		G_008010_CB03_BUSY(x)			(((x) >> 30) & 1)
+#define		G_008010_GUI_ACTIVE(x)			(((x) >> 31) & 1)
+#define	R_008014_GRBM_STATUS2			0x8014
+#define		S_008014_CR_CLEAN(x)			(((x) & 1) << 0)
+#define		S_008014_SMX_CLEAN(x)			(((x) & 1) << 1)
+#define		S_008014_SPI0_BUSY(x)			(((x) & 1) << 8)
+#define		S_008014_SPI1_BUSY(x)			(((x) & 1) << 9)
+#define		S_008014_SPI2_BUSY(x)			(((x) & 1) << 10)
+#define		S_008014_SPI3_BUSY(x)			(((x) & 1) << 11)
+#define		S_008014_TA0_BUSY(x)			(((x) & 1) << 12)
+#define		S_008014_TA1_BUSY(x)			(((x) & 1) << 13)
+#define		S_008014_TA2_BUSY(x)			(((x) & 1) << 14)
+#define		S_008014_TA3_BUSY(x)			(((x) & 1) << 15)
+#define		S_008014_DB0_BUSY(x)			(((x) & 1) << 16)
+#define		S_008014_DB1_BUSY(x)			(((x) & 1) << 17)
+#define		S_008014_DB2_BUSY(x)			(((x) & 1) << 18)
+#define		S_008014_DB3_BUSY(x)			(((x) & 1) << 19)
+#define		S_008014_CB0_BUSY(x)			(((x) & 1) << 20)
+#define		S_008014_CB1_BUSY(x)			(((x) & 1) << 21)
+#define		S_008014_CB2_BUSY(x)			(((x) & 1) << 22)
+#define		S_008014_CB3_BUSY(x)			(((x) & 1) << 23)
+#define		G_008014_CR_CLEAN(x)			(((x) >> 0) & 1)
+#define		G_008014_SMX_CLEAN(x)			(((x) >> 1) & 1)
+#define		G_008014_SPI0_BUSY(x)			(((x) >> 8) & 1)
+#define		G_008014_SPI1_BUSY(x)			(((x) >> 9) & 1)
+#define		G_008014_SPI2_BUSY(x)			(((x) >> 10) & 1)
+#define		G_008014_SPI3_BUSY(x)			(((x) >> 11) & 1)
+#define		G_008014_TA0_BUSY(x)			(((x) >> 12) & 1)
+#define		G_008014_TA1_BUSY(x)			(((x) >> 13) & 1)
+#define		G_008014_TA2_BUSY(x)			(((x) >> 14) & 1)
+#define		G_008014_TA3_BUSY(x)			(((x) >> 15) & 1)
+#define		G_008014_DB0_BUSY(x)			(((x) >> 16) & 1)
+#define		G_008014_DB1_BUSY(x)			(((x) >> 17) & 1)
+#define		G_008014_DB2_BUSY(x)			(((x) >> 18) & 1)
+#define		G_008014_DB3_BUSY(x)			(((x) >> 19) & 1)
+#define		G_008014_CB0_BUSY(x)			(((x) >> 20) & 1)
+#define		G_008014_CB1_BUSY(x)			(((x) >> 21) & 1)
+#define		G_008014_CB2_BUSY(x)			(((x) >> 22) & 1)
+#define		G_008014_CB3_BUSY(x)			(((x) >> 23) & 1)
+#define	R_000E50_SRBM_STATUS				0x0E50
+#define		G_000E50_RLC_RQ_PENDING(x)		(((x) >> 3) & 1)
+#define		G_000E50_RCU_RQ_PENDING(x)		(((x) >> 4) & 1)
+#define		G_000E50_GRBM_RQ_PENDING(x)		(((x) >> 5) & 1)
+#define		G_000E50_HI_RQ_PENDING(x)		(((x) >> 6) & 1)
+#define		G_000E50_IO_EXTERN_SIGNAL(x)		(((x) >> 7) & 1)
+#define		G_000E50_VMC_BUSY(x)			(((x) >> 8) & 1)
+#define		G_000E50_MCB_BUSY(x)			(((x) >> 9) & 1)
+#define		G_000E50_MCDZ_BUSY(x)			(((x) >> 10) & 1)
+#define		G_000E50_MCDY_BUSY(x)			(((x) >> 11) & 1)
+#define		G_000E50_MCDX_BUSY(x)			(((x) >> 12) & 1)
+#define		G_000E50_MCDW_BUSY(x)			(((x) >> 13) & 1)
+#define		G_000E50_SEM_BUSY(x)			(((x) >> 14) & 1)
+#define		G_000E50_RLC_BUSY(x)			(((x) >> 15) & 1)
+#define		G_000E50_BIF_BUSY(x)			(((x) >> 29) & 1)
+#define	R_000E60_SRBM_SOFT_RESET			0x0E60
+#define		S_000E60_SOFT_RESET_BIF(x)		(((x) & 1) << 1)
+#define		S_000E60_SOFT_RESET_CG(x)		(((x) & 1) << 2)
+#define		S_000E60_SOFT_RESET_CMC(x)		(((x) & 1) << 3)
+#define		S_000E60_SOFT_RESET_CSC(x)		(((x) & 1) << 4)
+#define		S_000E60_SOFT_RESET_DC(x)		(((x) & 1) << 5)
+#define		S_000E60_SOFT_RESET_GRBM(x)		(((x) & 1) << 8)
+#define		S_000E60_SOFT_RESET_HDP(x)		(((x) & 1) << 9)
+#define		S_000E60_SOFT_RESET_IH(x)		(((x) & 1) << 10)
+#define		S_000E60_SOFT_RESET_MC(x)		(((x) & 1) << 11)
+#define		S_000E60_SOFT_RESET_RLC(x)		(((x) & 1) << 13)
+#define		S_000E60_SOFT_RESET_ROM(x)		(((x) & 1) << 14)
+#define		S_000E60_SOFT_RESET_SEM(x)		(((x) & 1) << 15)
+#define		S_000E60_SOFT_RESET_TSC(x)		(((x) & 1) << 16)
+#define		S_000E60_SOFT_RESET_VMC(x)		(((x) & 1) << 17)
+
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL		0x5480
+
+#define R_028C04_PA_SC_AA_CONFIG                     0x028C04
+#define   S_028C04_MSAA_NUM_SAMPLES(x)                 (((x) & 0x3) << 0)
+#define   G_028C04_MSAA_NUM_SAMPLES(x)                 (((x) >> 0) & 0x3)
+#define   C_028C04_MSAA_NUM_SAMPLES                    0xFFFFFFFC
+#define   S_028C04_AA_MASK_CENTROID_DTMN(x)            (((x) & 0x1) << 4)
+#define   G_028C04_AA_MASK_CENTROID_DTMN(x)            (((x) >> 4) & 0x1)
+#define   C_028C04_AA_MASK_CENTROID_DTMN               0xFFFFFFEF
+#define   S_028C04_MAX_SAMPLE_DIST(x)                  (((x) & 0xF) << 13)
+#define   G_028C04_MAX_SAMPLE_DIST(x)                  (((x) >> 13) & 0xF)
+#define   C_028C04_MAX_SAMPLE_DIST                     0xFFFE1FFF
+#define R_0280E0_CB_COLOR0_FRAG                      0x0280E0
+#define   S_0280E0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
+#define   G_0280E0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0280E0_BASE_256B                           0x00000000
+#define R_0280E4_CB_COLOR1_FRAG                      0x0280E4
+#define R_0280E8_CB_COLOR2_FRAG                      0x0280E8
+#define R_0280EC_CB_COLOR3_FRAG                      0x0280EC
+#define R_0280F0_CB_COLOR4_FRAG                      0x0280F0
+#define R_0280F4_CB_COLOR5_FRAG                      0x0280F4
+#define R_0280F8_CB_COLOR6_FRAG                      0x0280F8
+#define R_0280FC_CB_COLOR7_FRAG                      0x0280FC
+#define R_0280C0_CB_COLOR0_TILE                      0x0280C0
+#define   S_0280C0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
+#define   G_0280C0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0280C0_BASE_256B                           0x00000000
+#define R_0280C4_CB_COLOR1_TILE                      0x0280C4
+#define R_0280C8_CB_COLOR2_TILE                      0x0280C8
+#define R_0280CC_CB_COLOR3_TILE                      0x0280CC
+#define R_0280D0_CB_COLOR4_TILE                      0x0280D0
+#define R_0280D4_CB_COLOR5_TILE                      0x0280D4
+#define R_0280D8_CB_COLOR6_TILE                      0x0280D8
+#define R_0280DC_CB_COLOR7_TILE                      0x0280DC
+#define R_0280A0_CB_COLOR0_INFO                      0x0280A0
+#define   S_0280A0_ENDIAN(x)                           (((x) & 0x3) << 0)
+#define   G_0280A0_ENDIAN(x)                           (((x) >> 0) & 0x3)
+#define   C_0280A0_ENDIAN                              0xFFFFFFFC
+#define   S_0280A0_FORMAT(x)                           (((x) & 0x3F) << 2)
+#define   G_0280A0_FORMAT(x)                           (((x) >> 2) & 0x3F)
+#define   C_0280A0_FORMAT                              0xFFFFFF03
+#define     V_0280A0_COLOR_INVALID                     0x00000000
+#define     V_0280A0_COLOR_8                           0x00000001
+#define     V_0280A0_COLOR_4_4                         0x00000002
+#define     V_0280A0_COLOR_3_3_2                       0x00000003
+#define     V_0280A0_COLOR_16                          0x00000005
+#define     V_0280A0_COLOR_16_FLOAT                    0x00000006
+#define     V_0280A0_COLOR_8_8                         0x00000007
+#define     V_0280A0_COLOR_5_6_5                       0x00000008
+#define     V_0280A0_COLOR_6_5_5                       0x00000009
+#define     V_0280A0_COLOR_1_5_5_5                     0x0000000A
+#define     V_0280A0_COLOR_4_4_4_4                     0x0000000B
+#define     V_0280A0_COLOR_5_5_5_1                     0x0000000C
+#define     V_0280A0_COLOR_32                          0x0000000D
+#define     V_0280A0_COLOR_32_FLOAT                    0x0000000E
+#define     V_0280A0_COLOR_16_16                       0x0000000F
+#define     V_0280A0_COLOR_16_16_FLOAT                 0x00000010
+#define     V_0280A0_COLOR_8_24                        0x00000011
+#define     V_0280A0_COLOR_8_24_FLOAT                  0x00000012
+#define     V_0280A0_COLOR_24_8                        0x00000013
+#define     V_0280A0_COLOR_24_8_FLOAT                  0x00000014
+#define     V_0280A0_COLOR_10_11_11                    0x00000015
+#define     V_0280A0_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_0280A0_COLOR_11_11_10                    0x00000017
+#define     V_0280A0_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_0280A0_COLOR_2_10_10_10                  0x00000019
+#define     V_0280A0_COLOR_8_8_8_8                     0x0000001A
+#define     V_0280A0_COLOR_10_10_10_2                  0x0000001B
+#define     V_0280A0_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_0280A0_COLOR_32_32                       0x0000001D
+#define     V_0280A0_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_0280A0_COLOR_16_16_16_16                 0x0000001F
+#define     V_0280A0_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_0280A0_COLOR_32_32_32_32                 0x00000022
+#define     V_0280A0_COLOR_32_32_32_32_FLOAT           0x00000023
+#define   S_0280A0_ARRAY_MODE(x)                       (((x) & 0xF) << 8)
+#define   G_0280A0_ARRAY_MODE(x)                       (((x) >> 8) & 0xF)
+#define   C_0280A0_ARRAY_MODE                          0xFFFFF0FF
+#define     V_0280A0_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_0280A0_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_0280A0_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_0280A0_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_0280A0_NUMBER_TYPE(x)                      (((x) & 0x7) << 12)
+#define   G_0280A0_NUMBER_TYPE(x)                      (((x) >> 12) & 0x7)
+#define   C_0280A0_NUMBER_TYPE                         0xFFFF8FFF
+#define   S_0280A0_READ_SIZE(x)                        (((x) & 0x1) << 15)
+#define   G_0280A0_READ_SIZE(x)                        (((x) >> 15) & 0x1)
+#define   C_0280A0_READ_SIZE                           0xFFFF7FFF
+#define   S_0280A0_COMP_SWAP(x)                        (((x) & 0x3) << 16)
+#define   G_0280A0_COMP_SWAP(x)                        (((x) >> 16) & 0x3)
+#define   C_0280A0_COMP_SWAP                           0xFFFCFFFF
+#define   S_0280A0_TILE_MODE(x)                        (((x) & 0x3) << 18)
+#define   G_0280A0_TILE_MODE(x)                        (((x) >> 18) & 0x3)
+#define   C_0280A0_TILE_MODE                           0xFFF3FFFF
+#define     V_0280A0_TILE_DISABLE			0
+#define     V_0280A0_CLEAR_ENABLE			1
+#define     V_0280A0_FRAG_ENABLE			2
+#define   S_0280A0_BLEND_CLAMP(x)                      (((x) & 0x1) << 20)
+#define   G_0280A0_BLEND_CLAMP(x)                      (((x) >> 20) & 0x1)
+#define   C_0280A0_BLEND_CLAMP                         0xFFEFFFFF
+#define   S_0280A0_CLEAR_COLOR(x)                      (((x) & 0x1) << 21)
+#define   G_0280A0_CLEAR_COLOR(x)                      (((x) >> 21) & 0x1)
+#define   C_0280A0_CLEAR_COLOR                         0xFFDFFFFF
+#define   S_0280A0_BLEND_BYPASS(x)                     (((x) & 0x1) << 22)
+#define   G_0280A0_BLEND_BYPASS(x)                     (((x) >> 22) & 0x1)
+#define   C_0280A0_BLEND_BYPASS                        0xFFBFFFFF
+#define   S_0280A0_BLEND_FLOAT32(x)                    (((x) & 0x1) << 23)
+#define   G_0280A0_BLEND_FLOAT32(x)                    (((x) >> 23) & 0x1)
+#define   C_0280A0_BLEND_FLOAT32                       0xFF7FFFFF
+#define   S_0280A0_SIMPLE_FLOAT(x)                     (((x) & 0x1) << 24)
+#define   G_0280A0_SIMPLE_FLOAT(x)                     (((x) >> 24) & 0x1)
+#define   C_0280A0_SIMPLE_FLOAT                        0xFEFFFFFF
+#define   S_0280A0_ROUND_MODE(x)                       (((x) & 0x1) << 25)
+#define   G_0280A0_ROUND_MODE(x)                       (((x) >> 25) & 0x1)
+#define   C_0280A0_ROUND_MODE                          0xFDFFFFFF
+#define   S_0280A0_TILE_COMPACT(x)                     (((x) & 0x1) << 26)
+#define   G_0280A0_TILE_COMPACT(x)                     (((x) >> 26) & 0x1)
+#define   C_0280A0_TILE_COMPACT                        0xFBFFFFFF
+#define   S_0280A0_SOURCE_FORMAT(x)                    (((x) & 0x1) << 27)
+#define   G_0280A0_SOURCE_FORMAT(x)                    (((x) >> 27) & 0x1)
+#define   C_0280A0_SOURCE_FORMAT                       0xF7FFFFFF
+#define R_0280A4_CB_COLOR1_INFO                      0x0280A4
+#define R_0280A8_CB_COLOR2_INFO                      0x0280A8
+#define R_0280AC_CB_COLOR3_INFO                      0x0280AC
+#define R_0280B0_CB_COLOR4_INFO                      0x0280B0
+#define R_0280B4_CB_COLOR5_INFO                      0x0280B4
+#define R_0280B8_CB_COLOR6_INFO                      0x0280B8
+#define R_0280BC_CB_COLOR7_INFO                      0x0280BC
+#define R_028060_CB_COLOR0_SIZE                      0x028060
+#define   S_028060_PITCH_TILE_MAX(x)                   (((x) & 0x3FF) << 0)
+#define   G_028060_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x3FF)
+#define   C_028060_PITCH_TILE_MAX                      0xFFFFFC00
+#define   S_028060_SLICE_TILE_MAX(x)                   (((x) & 0xFFFFF) << 10)
+#define   G_028060_SLICE_TILE_MAX(x)                   (((x) >> 10) & 0xFFFFF)
+#define   C_028060_SLICE_TILE_MAX                      0xC00003FF
+#define R_028064_CB_COLOR1_SIZE                      0x028064
+#define R_028068_CB_COLOR2_SIZE                      0x028068
+#define R_02806C_CB_COLOR3_SIZE                      0x02806C
+#define R_028070_CB_COLOR4_SIZE                      0x028070
+#define R_028074_CB_COLOR5_SIZE                      0x028074
+#define R_028078_CB_COLOR6_SIZE                      0x028078
+#define R_02807C_CB_COLOR7_SIZE                      0x02807C
+#define R_028238_CB_TARGET_MASK                      0x028238
+#define   S_028238_TARGET0_ENABLE(x)                   (((x) & 0xF) << 0)
+#define   G_028238_TARGET0_ENABLE(x)                   (((x) >> 0) & 0xF)
+#define   C_028238_TARGET0_ENABLE                      0xFFFFFFF0
+#define   S_028238_TARGET1_ENABLE(x)                   (((x) & 0xF) << 4)
+#define   G_028238_TARGET1_ENABLE(x)                   (((x) >> 4) & 0xF)
+#define   C_028238_TARGET1_ENABLE                      0xFFFFFF0F
+#define   S_028238_TARGET2_ENABLE(x)                   (((x) & 0xF) << 8)
+#define   G_028238_TARGET2_ENABLE(x)                   (((x) >> 8) & 0xF)
+#define   C_028238_TARGET2_ENABLE                      0xFFFFF0FF
+#define   S_028238_TARGET3_ENABLE(x)                   (((x) & 0xF) << 12)
+#define   G_028238_TARGET3_ENABLE(x)                   (((x) >> 12) & 0xF)
+#define   C_028238_TARGET3_ENABLE                      0xFFFF0FFF
+#define   S_028238_TARGET4_ENABLE(x)                   (((x) & 0xF) << 16)
+#define   G_028238_TARGET4_ENABLE(x)                   (((x) >> 16) & 0xF)
+#define   C_028238_TARGET4_ENABLE                      0xFFF0FFFF
+#define   S_028238_TARGET5_ENABLE(x)                   (((x) & 0xF) << 20)
+#define   G_028238_TARGET5_ENABLE(x)                   (((x) >> 20) & 0xF)
+#define   C_028238_TARGET5_ENABLE                      0xFF0FFFFF
+#define   S_028238_TARGET6_ENABLE(x)                   (((x) & 0xF) << 24)
+#define   G_028238_TARGET6_ENABLE(x)                   (((x) >> 24) & 0xF)
+#define   C_028238_TARGET6_ENABLE                      0xF0FFFFFF
+#define   S_028238_TARGET7_ENABLE(x)                   (((x) & 0xF) << 28)
+#define   G_028238_TARGET7_ENABLE(x)                   (((x) >> 28) & 0xF)
+#define   C_028238_TARGET7_ENABLE                      0x0FFFFFFF
+#define R_02823C_CB_SHADER_MASK                      0x02823C
+#define   S_02823C_OUTPUT0_ENABLE(x)                   (((x) & 0xF) << 0)
+#define   G_02823C_OUTPUT0_ENABLE(x)                   (((x) >> 0) & 0xF)
+#define   C_02823C_OUTPUT0_ENABLE                      0xFFFFFFF0
+#define   S_02823C_OUTPUT1_ENABLE(x)                   (((x) & 0xF) << 4)
+#define   G_02823C_OUTPUT1_ENABLE(x)                   (((x) >> 4) & 0xF)
+#define   C_02823C_OUTPUT1_ENABLE                      0xFFFFFF0F
+#define   S_02823C_OUTPUT2_ENABLE(x)                   (((x) & 0xF) << 8)
+#define   G_02823C_OUTPUT2_ENABLE(x)                   (((x) >> 8) & 0xF)
+#define   C_02823C_OUTPUT2_ENABLE                      0xFFFFF0FF
+#define   S_02823C_OUTPUT3_ENABLE(x)                   (((x) & 0xF) << 12)
+#define   G_02823C_OUTPUT3_ENABLE(x)                   (((x) >> 12) & 0xF)
+#define   C_02823C_OUTPUT3_ENABLE                      0xFFFF0FFF
+#define   S_02823C_OUTPUT4_ENABLE(x)                   (((x) & 0xF) << 16)
+#define   G_02823C_OUTPUT4_ENABLE(x)                   (((x) >> 16) & 0xF)
+#define   C_02823C_OUTPUT4_ENABLE                      0xFFF0FFFF
+#define   S_02823C_OUTPUT5_ENABLE(x)                   (((x) & 0xF) << 20)
+#define   G_02823C_OUTPUT5_ENABLE(x)                   (((x) >> 20) & 0xF)
+#define   C_02823C_OUTPUT5_ENABLE                      0xFF0FFFFF
+#define   S_02823C_OUTPUT6_ENABLE(x)                   (((x) & 0xF) << 24)
+#define   G_02823C_OUTPUT6_ENABLE(x)                   (((x) >> 24) & 0xF)
+#define   C_02823C_OUTPUT6_ENABLE                      0xF0FFFFFF
+#define   S_02823C_OUTPUT7_ENABLE(x)                   (((x) & 0xF) << 28)
+#define   G_02823C_OUTPUT7_ENABLE(x)                   (((x) >> 28) & 0xF)
+#define   C_02823C_OUTPUT7_ENABLE                      0x0FFFFFFF
+#define R_028AB0_VGT_STRMOUT_EN                      0x028AB0
+#define   S_028AB0_STREAMOUT(x)                        (((x) & 0x1) << 0)
+#define   G_028AB0_STREAMOUT(x)                        (((x) >> 0) & 0x1)
+#define   C_028AB0_STREAMOUT                           0xFFFFFFFE
+#define R_028B20_VGT_STRMOUT_BUFFER_EN               0x028B20
+#define   S_028B20_BUFFER_0_EN(x)                      (((x) & 0x1) << 0)
+#define   G_028B20_BUFFER_0_EN(x)                      (((x) >> 0) & 0x1)
+#define   C_028B20_BUFFER_0_EN                         0xFFFFFFFE
+#define   S_028B20_BUFFER_1_EN(x)                      (((x) & 0x1) << 1)
+#define   G_028B20_BUFFER_1_EN(x)                      (((x) >> 1) & 0x1)
+#define   C_028B20_BUFFER_1_EN                         0xFFFFFFFD
+#define   S_028B20_BUFFER_2_EN(x)                      (((x) & 0x1) << 2)
+#define   G_028B20_BUFFER_2_EN(x)                      (((x) >> 2) & 0x1)
+#define   C_028B20_BUFFER_2_EN                         0xFFFFFFFB
+#define   S_028B20_BUFFER_3_EN(x)                      (((x) & 0x1) << 3)
+#define   G_028B20_BUFFER_3_EN(x)                      (((x) >> 3) & 0x1)
+#define   C_028B20_BUFFER_3_EN                         0xFFFFFFF7
+#define   S_028B20_SIZE(x)                             (((x) & 0xFFFFFFFF) << 0)
+#define   G_028B20_SIZE(x)                             (((x) >> 0) & 0xFFFFFFFF)
+#define   C_028B20_SIZE                                0x00000000
+#define R_038000_SQ_TEX_RESOURCE_WORD0_0             0x038000
+#define   S_038000_DIM(x)                              (((x) & 0x7) << 0)
+#define   G_038000_DIM(x)                              (((x) >> 0) & 0x7)
+#define   C_038000_DIM                                 0xFFFFFFF8
+#define     V_038000_SQ_TEX_DIM_1D                     0x00000000
+#define     V_038000_SQ_TEX_DIM_2D                     0x00000001
+#define     V_038000_SQ_TEX_DIM_3D                     0x00000002
+#define     V_038000_SQ_TEX_DIM_CUBEMAP                0x00000003
+#define     V_038000_SQ_TEX_DIM_1D_ARRAY               0x00000004
+#define     V_038000_SQ_TEX_DIM_2D_ARRAY               0x00000005
+#define     V_038000_SQ_TEX_DIM_2D_MSAA                0x00000006
+#define     V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA          0x00000007
+#define   S_038000_TILE_MODE(x)                        (((x) & 0xF) << 3)
+#define   G_038000_TILE_MODE(x)                        (((x) >> 3) & 0xF)
+#define   C_038000_TILE_MODE                           0xFFFFFF87
+#define     V_038000_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_038000_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_038000_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_038000_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_038000_TILE_TYPE(x)                        (((x) & 0x1) << 7)
+#define   G_038000_TILE_TYPE(x)                        (((x) >> 7) & 0x1)
+#define   C_038000_TILE_TYPE                           0xFFFFFF7F
+#define   S_038000_PITCH(x)                            (((x) & 0x7FF) << 8)
+#define   G_038000_PITCH(x)                            (((x) >> 8) & 0x7FF)
+#define   C_038000_PITCH                               0xFFF800FF
+#define   S_038000_TEX_WIDTH(x)                        (((x) & 0x1FFF) << 19)
+#define   G_038000_TEX_WIDTH(x)                        (((x) >> 19) & 0x1FFF)
+#define   C_038000_TEX_WIDTH                           0x0007FFFF
+#define R_038004_SQ_TEX_RESOURCE_WORD1_0             0x038004
+#define   S_038004_TEX_HEIGHT(x)                       (((x) & 0x1FFF) << 0)
+#define   G_038004_TEX_HEIGHT(x)                       (((x) >> 0) & 0x1FFF)
+#define   C_038004_TEX_HEIGHT                          0xFFFFE000
+#define   S_038004_TEX_DEPTH(x)                        (((x) & 0x1FFF) << 13)
+#define   G_038004_TEX_DEPTH(x)                        (((x) >> 13) & 0x1FFF)
+#define   C_038004_TEX_DEPTH                           0xFC001FFF
+#define   S_038004_DATA_FORMAT(x)                      (((x) & 0x3F) << 26)
+#define   G_038004_DATA_FORMAT(x)                      (((x) >> 26) & 0x3F)
+#define   C_038004_DATA_FORMAT                         0x03FFFFFF
+#define     V_038004_COLOR_INVALID                     0x00000000
+#define     V_038004_COLOR_8                           0x00000001
+#define     V_038004_COLOR_4_4                         0x00000002
+#define     V_038004_COLOR_3_3_2                       0x00000003
+#define     V_038004_COLOR_16                          0x00000005
+#define     V_038004_COLOR_16_FLOAT                    0x00000006
+#define     V_038004_COLOR_8_8                         0x00000007
+#define     V_038004_COLOR_5_6_5                       0x00000008
+#define     V_038004_COLOR_6_5_5                       0x00000009
+#define     V_038004_COLOR_1_5_5_5                     0x0000000A
+#define     V_038004_COLOR_4_4_4_4                     0x0000000B
+#define     V_038004_COLOR_5_5_5_1                     0x0000000C
+#define     V_038004_COLOR_32                          0x0000000D
+#define     V_038004_COLOR_32_FLOAT                    0x0000000E
+#define     V_038004_COLOR_16_16                       0x0000000F
+#define     V_038004_COLOR_16_16_FLOAT                 0x00000010
+#define     V_038004_COLOR_8_24                        0x00000011
+#define     V_038004_COLOR_8_24_FLOAT                  0x00000012
+#define     V_038004_COLOR_24_8                        0x00000013
+#define     V_038004_COLOR_24_8_FLOAT                  0x00000014
+#define     V_038004_COLOR_10_11_11                    0x00000015
+#define     V_038004_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_038004_COLOR_11_11_10                    0x00000017
+#define     V_038004_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_038004_COLOR_2_10_10_10                  0x00000019
+#define     V_038004_COLOR_8_8_8_8                     0x0000001A
+#define     V_038004_COLOR_10_10_10_2                  0x0000001B
+#define     V_038004_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_038004_COLOR_32_32                       0x0000001D
+#define     V_038004_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_038004_COLOR_16_16_16_16                 0x0000001F
+#define     V_038004_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_038004_COLOR_32_32_32_32                 0x00000022
+#define     V_038004_COLOR_32_32_32_32_FLOAT           0x00000023
+#define     V_038004_FMT_1                             0x00000025
+#define     V_038004_FMT_GB_GR                         0x00000027
+#define     V_038004_FMT_BG_RG                         0x00000028
+#define     V_038004_FMT_32_AS_8                       0x00000029
+#define     V_038004_FMT_32_AS_8_8                     0x0000002A
+#define     V_038004_FMT_5_9_9_9_SHAREDEXP             0x0000002B
+#define     V_038004_FMT_8_8_8                         0x0000002C
+#define     V_038004_FMT_16_16_16                      0x0000002D
+#define     V_038004_FMT_16_16_16_FLOAT                0x0000002E
+#define     V_038004_FMT_32_32_32                      0x0000002F
+#define     V_038004_FMT_32_32_32_FLOAT                0x00000030
+#define     V_038004_FMT_BC1                           0x00000031
+#define     V_038004_FMT_BC2                           0x00000032
+#define     V_038004_FMT_BC3                           0x00000033
+#define     V_038004_FMT_BC4                           0x00000034
+#define     V_038004_FMT_BC5                           0x00000035
+#define     V_038004_FMT_BC6                           0x00000036
+#define     V_038004_FMT_BC7                           0x00000037
+#define     V_038004_FMT_32_AS_32_32_32_32             0x00000038
+#define R_038010_SQ_TEX_RESOURCE_WORD4_0             0x038010
+#define   S_038010_FORMAT_COMP_X(x)                    (((x) & 0x3) << 0)
+#define   G_038010_FORMAT_COMP_X(x)                    (((x) >> 0) & 0x3)
+#define   C_038010_FORMAT_COMP_X                       0xFFFFFFFC
+#define   S_038010_FORMAT_COMP_Y(x)                    (((x) & 0x3) << 2)
+#define   G_038010_FORMAT_COMP_Y(x)                    (((x) >> 2) & 0x3)
+#define   C_038010_FORMAT_COMP_Y                       0xFFFFFFF3
+#define   S_038010_FORMAT_COMP_Z(x)                    (((x) & 0x3) << 4)
+#define   G_038010_FORMAT_COMP_Z(x)                    (((x) >> 4) & 0x3)
+#define   C_038010_FORMAT_COMP_Z                       0xFFFFFFCF
+#define   S_038010_FORMAT_COMP_W(x)                    (((x) & 0x3) << 6)
+#define   G_038010_FORMAT_COMP_W(x)                    (((x) >> 6) & 0x3)
+#define   C_038010_FORMAT_COMP_W                       0xFFFFFF3F
+#define   S_038010_NUM_FORMAT_ALL(x)                   (((x) & 0x3) << 8)
+#define   G_038010_NUM_FORMAT_ALL(x)                   (((x) >> 8) & 0x3)
+#define   C_038010_NUM_FORMAT_ALL                      0xFFFFFCFF
+#define   S_038010_SRF_MODE_ALL(x)                     (((x) & 0x1) << 10)
+#define   G_038010_SRF_MODE_ALL(x)                     (((x) >> 10) & 0x1)
+#define   C_038010_SRF_MODE_ALL                        0xFFFFFBFF
+#define   S_038010_FORCE_DEGAMMA(x)                    (((x) & 0x1) << 11)
+#define   G_038010_FORCE_DEGAMMA(x)                    (((x) >> 11) & 0x1)
+#define   C_038010_FORCE_DEGAMMA                       0xFFFFF7FF
+#define   S_038010_ENDIAN_SWAP(x)                      (((x) & 0x3) << 12)
+#define   G_038010_ENDIAN_SWAP(x)                      (((x) >> 12) & 0x3)
+#define   C_038010_ENDIAN_SWAP                         0xFFFFCFFF
+#define   S_038010_REQUEST_SIZE(x)                     (((x) & 0x3) << 14)
+#define   G_038010_REQUEST_SIZE(x)                     (((x) >> 14) & 0x3)
+#define   C_038010_REQUEST_SIZE                        0xFFFF3FFF
+#define   S_038010_DST_SEL_X(x)                        (((x) & 0x7) << 16)
+#define   G_038010_DST_SEL_X(x)                        (((x) >> 16) & 0x7)
+#define   C_038010_DST_SEL_X                           0xFFF8FFFF
+#define   S_038010_DST_SEL_Y(x)                        (((x) & 0x7) << 19)
+#define   G_038010_DST_SEL_Y(x)                        (((x) >> 19) & 0x7)
+#define   C_038010_DST_SEL_Y                           0xFFC7FFFF
+#define   S_038010_DST_SEL_Z(x)                        (((x) & 0x7) << 22)
+#define   G_038010_DST_SEL_Z(x)                        (((x) >> 22) & 0x7)
+#define   C_038010_DST_SEL_Z                           0xFE3FFFFF
+#define   S_038010_DST_SEL_W(x)                        (((x) & 0x7) << 25)
+#define   G_038010_DST_SEL_W(x)                        (((x) >> 25) & 0x7)
+#define   C_038010_DST_SEL_W                           0xF1FFFFFF
+#	define SQ_SEL_X					0
+#	define SQ_SEL_Y					1
+#	define SQ_SEL_Z					2
+#	define SQ_SEL_W					3
+#	define SQ_SEL_0					4
+#	define SQ_SEL_1					5
+#define   S_038010_BASE_LEVEL(x)                       (((x) & 0xF) << 28)
+#define   G_038010_BASE_LEVEL(x)                       (((x) >> 28) & 0xF)
+#define   C_038010_BASE_LEVEL                          0x0FFFFFFF
+#define R_038014_SQ_TEX_RESOURCE_WORD5_0             0x038014
+#define   S_038014_LAST_LEVEL(x)                       (((x) & 0xF) << 0)
+#define   G_038014_LAST_LEVEL(x)                       (((x) >> 0) & 0xF)
+#define   C_038014_LAST_LEVEL                          0xFFFFFFF0
+#define   S_038014_BASE_ARRAY(x)                       (((x) & 0x1FFF) << 4)
+#define   G_038014_BASE_ARRAY(x)                       (((x) >> 4) & 0x1FFF)
+#define   C_038014_BASE_ARRAY                          0xFFFE000F
+#define   S_038014_LAST_ARRAY(x)                       (((x) & 0x1FFF) << 17)
+#define   G_038014_LAST_ARRAY(x)                       (((x) >> 17) & 0x1FFF)
+#define   C_038014_LAST_ARRAY                          0xC001FFFF
+#define R_0288A8_SQ_ESGS_RING_ITEMSIZE               0x0288A8
+#define   S_0288A8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288A8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288A8_ITEMSIZE                            0xFFFF8000
+#define R_008C44_SQ_ESGS_RING_SIZE                   0x008C44
+#define   S_008C44_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C44_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C44_MEM_SIZE                            0x00000000
+#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE              0x0288B0
+#define   S_0288B0_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B0_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B0_ITEMSIZE                            0xFFFF8000
+#define R_008C54_SQ_ESTMP_RING_SIZE                  0x008C54
+#define   S_008C54_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C54_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C54_MEM_SIZE                            0x00000000
+#define R_0288C0_SQ_FBUF_RING_ITEMSIZE               0x0288C0
+#define   S_0288C0_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C0_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C0_ITEMSIZE                            0xFFFF8000
+#define R_008C74_SQ_FBUF_RING_SIZE                   0x008C74
+#define   S_008C74_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C74_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C74_MEM_SIZE                            0x00000000
+#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE              0x0288B4
+#define   S_0288B4_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B4_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B4_ITEMSIZE                            0xFFFF8000
+#define R_008C5C_SQ_GSTMP_RING_SIZE                  0x008C5C
+#define   S_008C5C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C5C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C5C_MEM_SIZE                            0x00000000
+#define R_0288AC_SQ_GSVS_RING_ITEMSIZE               0x0288AC
+#define   S_0288AC_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288AC_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288AC_ITEMSIZE                            0xFFFF8000
+#define R_008C4C_SQ_GSVS_RING_SIZE                   0x008C4C
+#define   S_008C4C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C4C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C4C_MEM_SIZE                            0x00000000
+#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE              0x0288BC
+#define   S_0288BC_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288BC_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288BC_ITEMSIZE                            0xFFFF8000
+#define R_008C6C_SQ_PSTMP_RING_SIZE                  0x008C6C
+#define   S_008C6C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C6C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C6C_MEM_SIZE                            0x00000000
+#define R_0288C4_SQ_REDUC_RING_ITEMSIZE              0x0288C4
+#define   S_0288C4_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C4_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C4_ITEMSIZE                            0xFFFF8000
+#define R_008C7C_SQ_REDUC_RING_SIZE                  0x008C7C
+#define   S_008C7C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C7C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C7C_MEM_SIZE                            0x00000000
+#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE              0x0288B8
+#define   S_0288B8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B8_ITEMSIZE                            0xFFFF8000
+#define R_008C64_SQ_VSTMP_RING_SIZE                  0x008C64
+#define   S_008C64_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C64_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C64_MEM_SIZE                            0x00000000
+#define R_0288C8_SQ_GS_VERT_ITEMSIZE                 0x0288C8
+#define   S_0288C8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C8_ITEMSIZE                            0xFFFF8000
+#define R_028010_DB_DEPTH_INFO                       0x028010
+#define   S_028010_FORMAT(x)                           (((x) & 0x7) << 0)
+#define   G_028010_FORMAT(x)                           (((x) >> 0) & 0x7)
+#define   C_028010_FORMAT                              0xFFFFFFF8
+#define     V_028010_DEPTH_INVALID                     0x00000000
+#define     V_028010_DEPTH_16                          0x00000001
+#define     V_028010_DEPTH_X8_24                       0x00000002
+#define     V_028010_DEPTH_8_24                        0x00000003
+#define     V_028010_DEPTH_X8_24_FLOAT                 0x00000004
+#define     V_028010_DEPTH_8_24_FLOAT                  0x00000005
+#define     V_028010_DEPTH_32_FLOAT                    0x00000006
+#define     V_028010_DEPTH_X24_8_32_FLOAT              0x00000007
+#define   S_028010_READ_SIZE(x)                        (((x) & 0x1) << 3)
+#define   G_028010_READ_SIZE(x)                        (((x) >> 3) & 0x1)
+#define   C_028010_READ_SIZE                           0xFFFFFFF7
+#define   S_028010_ARRAY_MODE(x)                       (((x) & 0xF) << 15)
+#define   G_028010_ARRAY_MODE(x)                       (((x) >> 15) & 0xF)
+#define   C_028010_ARRAY_MODE                          0xFFF87FFF
+#define     V_028010_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_028010_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_028010_TILE_SURFACE_ENABLE(x)              (((x) & 0x1) << 25)
+#define   G_028010_TILE_SURFACE_ENABLE(x)              (((x) >> 25) & 0x1)
+#define   C_028010_TILE_SURFACE_ENABLE                 0xFDFFFFFF
+#define   S_028010_TILE_COMPACT(x)                     (((x) & 0x1) << 26)
+#define   G_028010_TILE_COMPACT(x)                     (((x) >> 26) & 0x1)
+#define   C_028010_TILE_COMPACT                        0xFBFFFFFF
+#define   S_028010_ZRANGE_PRECISION(x)                 (((x) & 0x1) << 31)
+#define   G_028010_ZRANGE_PRECISION(x)                 (((x) >> 31) & 0x1)
+#define   C_028010_ZRANGE_PRECISION                    0x7FFFFFFF
+#define R_028000_DB_DEPTH_SIZE                       0x028000
+#define   S_028000_PITCH_TILE_MAX(x)                   (((x) & 0x3FF) << 0)
+#define   G_028000_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x3FF)
+#define   C_028000_PITCH_TILE_MAX                      0xFFFFFC00
+#define   S_028000_SLICE_TILE_MAX(x)                   (((x) & 0xFFFFF) << 10)
+#define   G_028000_SLICE_TILE_MAX(x)                   (((x) >> 10) & 0xFFFFF)
+#define   C_028000_SLICE_TILE_MAX                      0xC00003FF
+#define R_028004_DB_DEPTH_VIEW                       0x028004
+#define   S_028004_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028004_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028004_SLICE_START                         0xFFFFF800
+#define   S_028004_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028004_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028004_SLICE_MAX                           0xFF001FFF
+#define R_028800_DB_DEPTH_CONTROL                    0x028800
+#define   S_028800_STENCIL_ENABLE(x)                   (((x) & 0x1) << 0)
+#define   G_028800_STENCIL_ENABLE(x)                   (((x) >> 0) & 0x1)
+#define   C_028800_STENCIL_ENABLE                      0xFFFFFFFE
+#define   S_028800_Z_ENABLE(x)                         (((x) & 0x1) << 1)
+#define   G_028800_Z_ENABLE(x)                         (((x) >> 1) & 0x1)
+#define   C_028800_Z_ENABLE                            0xFFFFFFFD
+#define   S_028800_Z_WRITE_ENABLE(x)                   (((x) & 0x1) << 2)
+#define   G_028800_Z_WRITE_ENABLE(x)                   (((x) >> 2) & 0x1)
+#define   C_028800_Z_WRITE_ENABLE                      0xFFFFFFFB
+#define   S_028800_ZFUNC(x)                            (((x) & 0x7) << 4)
+#define   G_028800_ZFUNC(x)                            (((x) >> 4) & 0x7)
+#define   C_028800_ZFUNC                               0xFFFFFF8F
+#define   S_028800_BACKFACE_ENABLE(x)                  (((x) & 0x1) << 7)
+#define   G_028800_BACKFACE_ENABLE(x)                  (((x) >> 7) & 0x1)
+#define   C_028800_BACKFACE_ENABLE                     0xFFFFFF7F
+#define   S_028800_STENCILFUNC(x)                      (((x) & 0x7) << 8)
+#define   G_028800_STENCILFUNC(x)                      (((x) >> 8) & 0x7)
+#define   C_028800_STENCILFUNC                         0xFFFFF8FF
+#define   S_028800_STENCILFAIL(x)                      (((x) & 0x7) << 11)
+#define   G_028800_STENCILFAIL(x)                      (((x) >> 11) & 0x7)
+#define   C_028800_STENCILFAIL                         0xFFFFC7FF
+#define   S_028800_STENCILZPASS(x)                     (((x) & 0x7) << 14)
+#define   G_028800_STENCILZPASS(x)                     (((x) >> 14) & 0x7)
+#define   C_028800_STENCILZPASS                        0xFFFE3FFF
+#define   S_028800_STENCILZFAIL(x)                     (((x) & 0x7) << 17)
+#define   G_028800_STENCILZFAIL(x)                     (((x) >> 17) & 0x7)
+#define   C_028800_STENCILZFAIL                        0xFFF1FFFF
+#define   S_028800_STENCILFUNC_BF(x)                   (((x) & 0x7) << 20)
+#define   G_028800_STENCILFUNC_BF(x)                   (((x) >> 20) & 0x7)
+#define   C_028800_STENCILFUNC_BF                      0xFF8FFFFF
+#define   S_028800_STENCILFAIL_BF(x)                   (((x) & 0x7) << 23)
+#define   G_028800_STENCILFAIL_BF(x)                   (((x) >> 23) & 0x7)
+#define   C_028800_STENCILFAIL_BF                      0xFC7FFFFF
+#define   S_028800_STENCILZPASS_BF(x)                  (((x) & 0x7) << 26)
+#define   G_028800_STENCILZPASS_BF(x)                  (((x) >> 26) & 0x7)
+#define   C_028800_STENCILZPASS_BF                     0xE3FFFFFF
+#define   S_028800_STENCILZFAIL_BF(x)                  (((x) & 0x7) << 29)
+#define   G_028800_STENCILZFAIL_BF(x)                  (((x) >> 29) & 0x7)
+#define   C_028800_STENCILZFAIL_BF                     0x1FFFFFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/r600d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2064 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#ifndef __RADEON_H__
+#define __RADEON_H__
+
+/* TODO: Here are things that needs to be done :
+ *	- surface allocator & initializer : (bit like scratch reg) should
+ *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
+ *	  related to surface
+ *	- WB : write back stuff (do it bit like scratch reg things)
+ *	- Vblank : look at Jesse's rework and what we should do
+ *	- r600/r700: gart & cp
+ *	- cs : clean cs ioctl use bitmap & things like that.
+ *	- power management stuff
+ *	- Barrier in gart code
+ *	- Unmappabled vram ?
+ *	- TESTING, TESTING, TESTING
+ */
+
+/* Initialization path:
+ *  We expect that acceleration initialization might fail for various
+ *  reasons even thought we work hard to make it works on most
+ *  configurations. In order to still have a working userspace in such
+ *  situation the init path must succeed up to the memory controller
+ *  initialization point. Failure before this point are considered as
+ *  fatal error. Here is the init callchain :
+ *      radeon_device_init  perform common structure, mutex initialization
+ *      asic_init           setup the GPU memory layout and perform all
+ *                          one time initialization (failure in this
+ *                          function are considered fatal)
+ *      asic_startup        setup the GPU acceleration, in order to
+ *                          follow guideline the first thing this
+ *                          function should do is setting the GPU
+ *                          memory controller (only MC setup failure
+ *                          are considered as fatal)
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/linker.h>
+#include <sys/firmware.h>
+
+#if defined(CONFIG_ACPI)
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+#endif
+
+#include <dev/drm2/ttm/ttm_bo_api.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_execbuf_util.h>
+
+#include "radeon_family.h"
+#include "radeon_mode.h"
+#include "radeon_reg.h"
+
+/*
+ * Modules parameters.
+ */
+extern int radeon_no_wb;
+extern int radeon_modeset;
+extern int radeon_dynclks;
+extern int radeon_r4xx_atom;
+extern int radeon_agpmode;
+extern int radeon_vram_limit;
+extern int radeon_gart_size;
+extern int radeon_benchmarking;
+extern int radeon_testing;
+extern int radeon_connector_table;
+extern int radeon_tv;
+extern int radeon_audio;
+extern int radeon_disp_priority;
+extern int radeon_hw_i2c;
+extern int radeon_pcie_gen2;
+extern int radeon_msi;
+extern int radeon_lockup_timeout;
+
+/*
+ * Copy from radeon_drv.h so we don't have to include both and have conflicting
+ * symbol;
+ */
+#define RADEON_MAX_USEC_TIMEOUT			100000	/* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
+#define RADEON_IB_POOL_SIZE			16
+#define RADEON_DEBUGFS_MAX_COMPONENTS		32
+#define RADEONFB_CONN_LIMIT			4
+#define RADEON_BIOS_NUM_SCRATCH			8
+
+/* max number of rings */
+#define RADEON_NUM_RINGS			5
+
+/* fence seq are set to this number when signaled */
+#define RADEON_FENCE_SIGNALED_SEQ		0LL
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX		0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX		1
+#define CAYMAN_RING_TYPE_CP2_INDEX		2
+
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX		3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX		4
+
+/* hardcode those limit for now */
+#define RADEON_VA_IB_OFFSET			(1 << 20)
+#define RADEON_VA_RESERVED_SIZE			(8 << 20)
+#define RADEON_IB_VM_MAX_SIZE			(64 << 10)
+
+/* reset flags */
+#define RADEON_RESET_GFX			(1 << 0)
+#define RADEON_RESET_COMPUTE			(1 << 1)
+#define RADEON_RESET_DMA			(1 << 2)
+
+/*
+ * Errata workarounds.
+ */
+enum radeon_pll_errata {
+	CHIP_ERRATA_R300_CG             = 0x00000001,
+	CHIP_ERRATA_PLL_DUMMYREADS      = 0x00000002,
+	CHIP_ERRATA_PLL_DELAY           = 0x00000004
+};
+
+
+struct radeon_device;
+
+
+/*
+ * BIOS.
+ */
+bool radeon_get_bios(struct radeon_device *rdev);
+
+/*
+ * Dummy page
+ */
+struct radeon_dummy_page {
+	drm_dma_handle_t *dmah;
+	dma_addr_t	addr;
+};
+int radeon_dummy_page_init(struct radeon_device *rdev);
+void radeon_dummy_page_fini(struct radeon_device *rdev);
+
+
+/*
+ * Clocks
+ */
+struct radeon_clock {
+	struct radeon_pll p1pll;
+	struct radeon_pll p2pll;
+	struct radeon_pll dcpll;
+	struct radeon_pll spll;
+	struct radeon_pll mpll;
+	/* 10 Khz units */
+	uint32_t default_mclk;
+	uint32_t default_sclk;
+	uint32_t default_dispclk;
+	uint32_t dp_extclk;
+	uint32_t max_pixel_clock;
+};
+
+/*
+ * Power management
+ */
+int radeon_pm_init(struct radeon_device *rdev);
+void radeon_pm_fini(struct radeon_device *rdev);
+void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
+void radeon_combios_get_power_modes(struct radeon_device *rdev);
+void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+void rs690_pm_info(struct radeon_device *rdev);
+extern int rv6xx_get_temp(struct radeon_device *rdev);
+extern int rv770_get_temp(struct radeon_device *rdev);
+extern int evergreen_get_temp(struct radeon_device *rdev);
+extern int sumo_get_temp(struct radeon_device *rdev);
+extern int si_get_temp(struct radeon_device *rdev);
+extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+				    unsigned *bankh, unsigned *mtaspect,
+				    unsigned *tile_split);
+
+/*
+ * Fences.
+ */
+struct radeon_fence_driver {
+	uint32_t			scratch_reg;
+	uint64_t			gpu_addr;
+	volatile uint32_t		*cpu_addr;
+	/* sync_seq is protected by ring emission lock */
+	uint64_t			sync_seq[RADEON_NUM_RINGS];
+	atomic64_t			last_seq;
+	unsigned long			last_activity;
+	bool				initialized;
+};
+
+struct radeon_fence {
+	struct radeon_device		*rdev;
+	unsigned int			kref;
+	/* protected by radeon_fence.lock */
+	uint64_t			seq;
+	/* RB, DMA, etc. */
+	unsigned			ring;
+};
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
+int radeon_fence_driver_init(struct radeon_device *rdev);
+void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
+bool radeon_fence_signaled(struct radeon_fence *fence);
+int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+			  struct radeon_fence **fences,
+			  bool intr);
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
+void radeon_fence_unref(struct radeon_fence **fence);
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
+void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
+static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
+						      struct radeon_fence *b)
+{
+	if (!a) {
+		return b;
+	}
+
+	if (!b) {
+		return a;
+	}
+
+	KASSERT(a->ring == b->ring, ("\"a\" and \"b\" belongs to different rings"));
+
+	if (a->seq > b->seq) {
+		return a;
+	} else {
+		return b;
+	}
+}
+
+static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
+					   struct radeon_fence *b)
+{
+	if (!a) {
+		return false;
+	}
+
+	if (!b) {
+		return true;
+	}
+
+	KASSERT(a->ring == b->ring, ("\"a\" and \"b\" belongs to different rings"));
+
+	return a->seq < b->seq;
+}
+
+/*
+ * Tiling registers
+ */
+struct radeon_surface_reg {
+	struct radeon_bo *bo;
+};
+
+#define RADEON_GEM_MAX_SURFACES 8
+
+/*
+ * TTM.
+ */
+struct radeon_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	struct ttm_bo_device		bdev;
+	bool				mem_global_referenced;
+	bool				initialized;
+};
+
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+	/* protected by bo being reserved */
+	struct list_head		bo_list;
+	uint64_t			soffset;
+	uint64_t			eoffset;
+	uint32_t			flags;
+	bool				valid;
+	unsigned			ref_count;
+
+	/* protected by vm mutex */
+	struct list_head		vm_list;
+
+	/* constant after initialization */
+	struct radeon_vm		*vm;
+	struct radeon_bo		*bo;
+};
+
+struct radeon_bo {
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	u32				placements[3];
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+	unsigned			pin_count;
+	void				*kptr;
+	u32				tiling_flags;
+	u32				pitch;
+	int				surface_reg;
+	/* list of all virtual address to which this bo
+	 * is associated to
+	 */
+	struct list_head		va;
+	/* Constant after initialization */
+	struct radeon_device		*rdev;
+	struct drm_gem_object		gem_base;
+
+	struct ttm_bo_kmap_obj dma_buf_vmap;
+	int vmapping_count;
+};
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
+
+struct radeon_bo_list {
+	struct ttm_validate_buffer tv;
+	struct radeon_bo	*bo;
+	uint64_t		gpu_offset;
+	unsigned		rdomain;
+	unsigned		wdomain;
+	u32			tiling_flags;
+};
+
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+	struct cv		wq;
+	struct sx		wq_lock;
+	struct radeon_bo	*bo;
+	struct list_head	*hole;
+	struct list_head	flist[RADEON_NUM_RINGS];
+	struct list_head	olist;
+	unsigned		size;
+	uint64_t		gpu_addr;
+	void			*cpu_ptr;
+	uint32_t		domain;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+	struct list_head		olist;
+	struct list_head		flist;
+	struct radeon_sa_manager	*manager;
+	unsigned			soffset;
+	unsigned			eoffset;
+	struct radeon_fence		*fence;
+};
+
+/*
+ * GEM objects.
+ */
+struct radeon_gem {
+	struct sx		mutex;
+	struct list_head	objects;
+};
+
+int radeon_gem_init(struct radeon_device *rdev);
+void radeon_gem_fini(struct radeon_device *rdev);
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+				int alignment, int initial_domain,
+				bool discardable, bool kernel,
+				struct drm_gem_object **obj);
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+			     struct drm_device *dev,
+			     uint32_t handle);
+
+/*
+ * Semaphores.
+ */
+/* everything here is constant */
+struct radeon_semaphore {
+	struct radeon_sa_bo		*sa_bo;
+	signed				waiters;
+	uint64_t			gpu_addr;
+};
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+			    struct radeon_semaphore **semaphore);
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+				  struct radeon_semaphore *semaphore);
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+				struct radeon_semaphore *semaphore);
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+				struct radeon_semaphore *semaphore,
+				int signaler, int waiter);
+void radeon_semaphore_free(struct radeon_device *rdev,
+			   struct radeon_semaphore **semaphore,
+			   struct radeon_fence *fence);
+
+/*
+ * GART structures, functions & helpers
+ */
+struct radeon_mc;
+
+#define RADEON_GPU_PAGE_SIZE 4096
+#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+
+struct radeon_gart {
+	drm_dma_handle_t		*dmah;
+	dma_addr_t			table_addr;
+	struct radeon_bo		*robj;
+	void				*ptr;
+	unsigned			num_gpu_pages;
+	unsigned			num_cpu_pages;
+	unsigned			table_size;
+	vm_page_t			*pages;
+	dma_addr_t			*pages_addr;
+	bool				ready;
+};
+
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_ram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_vram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
+int radeon_gart_init(struct radeon_device *rdev);
+void radeon_gart_fini(struct radeon_device *rdev);
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+			int pages);
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+		     int pages, vm_page_t *pagelist,
+		     dma_addr_t *dma_addr);
+void radeon_gart_restore(struct radeon_device *rdev);
+
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct radeon_mc {
+	resource_size_t		aper_size;
+	resource_size_t		aper_base;
+	resource_size_t		agp_base;
+	/* for some chips with <= 32MB we need to lie
+	 * about vram size near mc fb location */
+	u64			mc_vram_size;
+	u64			visible_vram_size;
+	u64			gtt_size;
+	u64			gtt_start;
+	u64			gtt_end;
+	u64			vram_start;
+	u64			vram_end;
+	unsigned		vram_width;
+	u64			real_vram_size;
+	int			vram_mtrr;
+	bool			vram_is_ddr;
+	bool			igp_sideport_enabled;
+	u64                     gtt_base_align;
+};
+
+bool radeon_combios_sideport_present(struct radeon_device *rdev);
+bool radeon_atombios_sideport_present(struct radeon_device *rdev);
+
+/*
+ * GPU scratch registers structures, functions & helpers
+ */
+struct radeon_scratch {
+	unsigned		num_reg;
+	uint32_t                reg_base;
+	bool			free[32];
+	uint32_t		reg[32];
+};
+
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
+
+
+/*
+ * IRQS.
+ */
+
+struct radeon_unpin_work {
+	struct task work;
+	struct radeon_device *rdev;
+	int crtc_id;
+	struct radeon_fence *fence;
+	struct drm_pending_vblank_event *event;
+	struct radeon_bo *old_rbo;
+	u64 new_crtc_base;
+};
+
+struct r500_irq_stat_regs {
+	u32 disp_int;
+	u32 hdmi0_status;
+};
+
+struct r600_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 d1grph_int;
+	u32 d2grph_int;
+	u32 hdmi0_status;
+	u32 hdmi1_status;
+};
+
+struct evergreen_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 disp_int_cont3;
+	u32 disp_int_cont4;
+	u32 disp_int_cont5;
+	u32 d1grph_int;
+	u32 d2grph_int;
+	u32 d3grph_int;
+	u32 d4grph_int;
+	u32 d5grph_int;
+	u32 d6grph_int;
+	u32 afmt_status1;
+	u32 afmt_status2;
+	u32 afmt_status3;
+	u32 afmt_status4;
+	u32 afmt_status5;
+	u32 afmt_status6;
+};
+
+union radeon_irq_stat_regs {
+	struct r500_irq_stat_regs r500;
+	struct r600_irq_stat_regs r600;
+	struct evergreen_irq_stat_regs evergreen;
+};
+
+#define RADEON_MAX_HPD_PINS 6
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 6
+
+struct radeon_irq {
+	bool				installed;
+	struct mtx			lock;
+	atomic_t			ring_int[RADEON_NUM_RINGS];
+	bool				crtc_vblank_int[RADEON_MAX_CRTCS];
+	atomic_t			pflip[RADEON_MAX_CRTCS];
+	wait_queue_head_t		vblank_queue;
+	bool				hpd[RADEON_MAX_HPD_PINS];
+	bool				afmt[RADEON_MAX_AFMT_BLOCKS];
+	union radeon_irq_stat_regs	stat_regs;
+};
+
+int radeon_irq_kms_init(struct radeon_device *rdev);
+void radeon_irq_kms_fini(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+
+/*
+ * CP & rings.
+ */
+
+struct radeon_ib {
+	struct radeon_sa_bo		*sa_bo;
+	uint32_t			length_dw;
+	uint64_t			gpu_addr;
+	uint32_t			*ptr;
+	int				ring;
+	struct radeon_fence		*fence;
+	struct radeon_vm		*vm;
+	bool				is_const_ib;
+	struct radeon_fence		*sync_to[RADEON_NUM_RINGS];
+	struct radeon_semaphore		*semaphore;
+};
+
+struct radeon_ring {
+	struct radeon_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr;
+	unsigned		rptr_offs;
+	unsigned		rptr_reg;
+	unsigned		rptr_save_reg;
+	u64			next_rptr_gpu_addr;
+	volatile u32		*next_rptr_cpu_addr;
+	unsigned		wptr;
+	unsigned		wptr_old;
+	unsigned		wptr_reg;
+	unsigned		ring_size;
+	unsigned		ring_free_dw;
+	int			count_dw;
+	unsigned long		last_activity;
+	unsigned		last_rptr;
+	uint64_t		gpu_addr;
+	uint32_t		align_mask;
+	uint32_t		ptr_mask;
+	bool			ready;
+	u32			ptr_reg_shift;
+	u32			ptr_reg_mask;
+	u32			nop;
+	u32			idx;
+	u64			last_semaphore_signal_addr;
+	u64			last_semaphore_wait_addr;
+};
+
+/*
+ * VM
+ */
+
+/* maximum number of VMIDs */
+#define RADEON_NUM_VM	16
+
+/* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, 9 bits in the page
+ * table and the remaining 19 bits are in the page directory */
+#define RADEON_VM_BLOCK_SIZE   9
+
+/* number of entries in page table */
+#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
+
+struct radeon_vm {
+	struct list_head		list;
+	struct list_head		va;
+	unsigned			id;
+
+	/* contains the page directory */
+	struct radeon_sa_bo		*page_directory;
+	uint64_t			pd_gpu_addr;
+
+	/* array of page tables, one for each page directory entry */
+	struct radeon_sa_bo		**page_tables;
+
+	struct sx			mutex;
+	/* last fence for cs using this vm */
+	struct radeon_fence		*fence;
+	/* last flush or NULL if we still need to flush */
+	struct radeon_fence		*last_flush;
+};
+
+struct radeon_vm_manager {
+	struct sx			lock;
+	struct list_head		lru_vm;
+	struct radeon_fence		*active[RADEON_NUM_VM];
+	struct radeon_sa_manager	sa_manager;
+	uint32_t			max_pfn;
+	/* number of VMIDs */
+	unsigned			nvm;
+	/* vram base address for page table entry  */
+	u64				vram_base_offset;
+	/* is vm enabled? */
+	bool				enabled;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+	struct radeon_vm		vm;
+};
+
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+	struct radeon_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr;
+	unsigned		ring_size;
+	uint64_t		gpu_addr;
+	uint32_t		ptr_mask;
+	atomic_t		lock;
+	bool                    enabled;
+};
+
+struct r600_blit_cp_primitives {
+	void (*set_render_target)(struct radeon_device *rdev, int format,
+				  int w, int h, u64 gpu_addr);
+	void (*cp_set_surface_sync)(struct radeon_device *rdev,
+				    u32 sync_type, u32 size,
+				    u64 mc_addr);
+	void (*set_shaders)(struct radeon_device *rdev);
+	void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
+	void (*set_tex_resource)(struct radeon_device *rdev,
+				 int format, int w, int h, int pitch,
+				 u64 gpu_addr, u32 size);
+	void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
+			     int x2, int y2);
+	void (*draw_auto)(struct radeon_device *rdev);
+	void (*set_default_state)(struct radeon_device *rdev);
+};
+
+struct r600_blit {
+	struct radeon_bo	*shader_obj;
+	struct r600_blit_cp_primitives primitives;
+	int max_dim;
+	int ring_size_common;
+	int ring_size_per_loop;
+	u64 shader_gpu_addr;
+	u32 vs_offset, ps_offset;
+	u32 state_offset;
+	u32 state_len;
+};
+
+/*
+ * SI RLC stuff
+ */
+struct si_rlc {
+	/* for power gating */
+	struct radeon_bo	*save_restore_obj;
+	uint64_t		save_restore_gpu_addr;
+	/* for clear state */
+	struct radeon_bo	*clear_state_obj;
+	uint64_t		clear_state_gpu_addr;
+};
+
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+		  struct radeon_ib *ib, struct radeon_vm *vm,
+		  unsigned size);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+		       struct radeon_ib *const_ib);
+int radeon_ib_pool_init(struct radeon_device *rdev);
+void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_ring_tests(struct radeon_device *rdev);
+/* Ring access between begin & end cannot sleep */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+				      struct radeon_ring *ring);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_undo(struct radeon_ring *ring);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
+void radeon_ring_lockup_update(struct radeon_ring *ring);
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+			    uint32_t **data);
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+			unsigned size, uint32_t *data);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+
+
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
+/*
+ * CS.
+ */
+struct radeon_cs_reloc {
+	struct drm_gem_object		*gobj;
+	struct radeon_bo		*robj;
+	struct radeon_bo_list		lobj;
+	uint32_t			handle;
+	uint32_t			flags;
+};
+
+struct radeon_cs_chunk {
+	uint32_t		chunk_id;
+	uint32_t		length_dw;
+	int			kpage_idx[2];
+	uint32_t		*kpage[2];
+	uint32_t		*kdata;
+	void __user		*user_ptr;
+	int			last_copied_page;
+	int			last_page_index;
+};
+
+struct radeon_cs_parser {
+	device_t		dev;
+	struct radeon_device	*rdev;
+	struct drm_file		*filp;
+	/* chunks */
+	unsigned		nchunks;
+	struct radeon_cs_chunk	*chunks;
+	uint64_t		*chunks_array;
+	/* IB */
+	unsigned		idx;
+	/* relocations */
+	unsigned		nrelocs;
+	struct radeon_cs_reloc	*relocs;
+	struct radeon_cs_reloc	**relocs_ptr;
+	struct list_head	validated;
+	unsigned		dma_reloc_idx;
+	/* indices of various chunks */
+	int			chunk_ib_idx;
+	int			chunk_relocs_idx;
+	int			chunk_flags_idx;
+	int			chunk_const_ib_idx;
+	struct radeon_ib	ib;
+	struct radeon_ib	const_ib;
+	void			*track;
+	unsigned		family;
+	int			parser_error;
+	u32			cs_flags;
+	u32			ring;
+	s32			priority;
+};
+
+extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
+extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
+
+struct radeon_cs_packet {
+	unsigned	idx;
+	unsigned	type;
+	unsigned	reg;
+	unsigned	opcode;
+	int		count;
+	unsigned	one_reg_wr;
+};
+
+typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt,
+				      unsigned idx, unsigned reg);
+typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt);
+
+
+/*
+ * AGP
+ */
+int radeon_agp_init(struct radeon_device *rdev);
+void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
+void radeon_agp_fini(struct radeon_device *rdev);
+
+
+/*
+ * Writeback
+ */
+struct radeon_wb {
+	struct radeon_bo	*wb_obj;
+	volatile uint32_t	*wb;
+	uint64_t		gpu_addr;
+	bool                    enabled;
+	bool                    use_event;
+};
+
+#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_RING0_NEXT_RPTR 256
+#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define RADEON_WB_CP1_RPTR_OFFSET 1280
+#define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET   1792
+#define R600_WB_IH_WPTR_OFFSET   2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
+#define R600_WB_EVENT_OFFSET     3072
+
+/**
+ * struct radeon_pm - power management datas
+ * @max_bandwidth:      maximum bandwidth the gpu has (MByte/s)
+ * @igp_sideport_mclk:  sideport memory clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_system_mclk:    system clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_ht_link_clk:    ht link clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_ht_link_width:  ht link width in bits (rs690,rs740,rs780,rs880)
+ * @k8_bandwidth:       k8 bandwidth the gpu has (MByte/s) (IGP)
+ * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
+ * @ht_bandwidth:       ht bandwidth the gpu has (MByte/s) (IGP)
+ * @core_bandwidth:     core GPU bandwidth the gpu has (MByte/s) (IGP)
+ * @sclk:          	GPU clock Mhz (core bandwidth depends of this clock)
+ * @needed_bandwidth:   current bandwidth needs
+ *
+ * It keeps track of various data needed to take powermanagement decision.
+ * Bandwidth need is used to determine minimun clock of the GPU and memory.
+ * Equation between gpu/memory clock and available bandwidth is hw dependent
+ * (type of memory, bus size, efficiency, ...)
+ */
+
+enum radeon_pm_method {
+	PM_METHOD_PROFILE,
+	PM_METHOD_DYNPM,
+};
+
+enum radeon_dynpm_state {
+	DYNPM_STATE_DISABLED,
+	DYNPM_STATE_MINIMUM,
+	DYNPM_STATE_PAUSED,
+	DYNPM_STATE_ACTIVE,
+	DYNPM_STATE_SUSPENDED,
+};
+enum radeon_dynpm_action {
+	DYNPM_ACTION_NONE,
+	DYNPM_ACTION_MINIMUM,
+	DYNPM_ACTION_DOWNCLOCK,
+	DYNPM_ACTION_UPCLOCK,
+	DYNPM_ACTION_DEFAULT
+};
+
+enum radeon_voltage_type {
+	VOLTAGE_NONE = 0,
+	VOLTAGE_GPIO,
+	VOLTAGE_VDDC,
+	VOLTAGE_SW
+};
+
+enum radeon_pm_state_type {
+	POWER_STATE_TYPE_DEFAULT,
+	POWER_STATE_TYPE_POWERSAVE,
+	POWER_STATE_TYPE_BATTERY,
+	POWER_STATE_TYPE_BALANCED,
+	POWER_STATE_TYPE_PERFORMANCE,
+};
+
+enum radeon_pm_profile_type {
+	PM_PROFILE_DEFAULT,
+	PM_PROFILE_AUTO,
+	PM_PROFILE_LOW,
+	PM_PROFILE_MID,
+	PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX  1
+#define PM_PROFILE_MID_SH_IDX  2
+#define PM_PROFILE_HIGH_SH_IDX 3
+#define PM_PROFILE_LOW_MH_IDX  4
+#define PM_PROFILE_MID_MH_IDX  5
+#define PM_PROFILE_HIGH_MH_IDX 6
+#define PM_PROFILE_MAX         7
+
+struct radeon_pm_profile {
+	int dpms_off_ps_idx;
+	int dpms_on_ps_idx;
+	int dpms_off_cm_idx;
+	int dpms_on_cm_idx;
+};
+
+enum radeon_int_thermal_type {
+	THERMAL_TYPE_NONE,
+	THERMAL_TYPE_RV6XX,
+	THERMAL_TYPE_RV770,
+	THERMAL_TYPE_EVERGREEN,
+	THERMAL_TYPE_SUMO,
+	THERMAL_TYPE_NI,
+	THERMAL_TYPE_SI,
+};
+
+struct radeon_voltage {
+	enum radeon_voltage_type type;
+	/* gpio voltage */
+	struct radeon_gpio_rec gpio;
+	u32 delay; /* delay in usec from voltage drop to sclk change */
+	bool active_high; /* voltage drop is active when bit is high */
+	/* VDDC voltage */
+	u8 vddc_id; /* index into vddc voltage table */
+	u8 vddci_id; /* index into vddci voltage table */
+	bool vddci_enabled;
+	/* r6xx+ sw */
+	u16 voltage;
+	/* evergreen+ vddci */
+	u16 vddci;
+};
+
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)
+
+struct radeon_pm_clock_info {
+	/* memory clock */
+	u32 mclk;
+	/* engine clock */
+	u32 sclk;
+	/* voltage info */
+	struct radeon_voltage voltage;
+	/* standardized clock flags */
+	u32 flags;
+};
+
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
+struct radeon_power_state {
+	enum radeon_pm_state_type type;
+	struct radeon_pm_clock_info *clock_info;
+	/* number of valid clock modes in this power state */
+	int num_clock_modes;
+	struct radeon_pm_clock_info *default_clock_mode;
+	/* standardized state flags */
+	u32 flags;
+	u32 misc; /* vbios specific flags */
+	u32 misc2; /* vbios specific flags */
+	int pcie_lanes; /* pcie lanes */
+};
+
+/*
+ * Some modes are overclocked by very low value, accept them
+ */
+#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
+
+struct radeon_pm {
+	struct sx		mutex;
+	/* write locked while reprogramming mclk */
+	struct sx		mclk_lock;
+	u32			active_crtcs;
+	int			active_crtc_count;
+	int			req_vblank;
+	bool			vblank_sync;
+	fixed20_12		max_bandwidth;
+	fixed20_12		igp_sideport_mclk;
+	fixed20_12		igp_system_mclk;
+	fixed20_12		igp_ht_link_clk;
+	fixed20_12		igp_ht_link_width;
+	fixed20_12		k8_bandwidth;
+	fixed20_12		sideport_bandwidth;
+	fixed20_12		ht_bandwidth;
+	fixed20_12		core_bandwidth;
+	fixed20_12		sclk;
+	fixed20_12		mclk;
+	fixed20_12		needed_bandwidth;
+	struct radeon_power_state *power_state;
+	/* number of valid power states */
+	int                     num_power_states;
+	int                     current_power_state_index;
+	int                     current_clock_mode_index;
+	int                     requested_power_state_index;
+	int                     requested_clock_mode_index;
+	int                     default_power_state_index;
+	u32                     current_sclk;
+	u32                     current_mclk;
+	u16                     current_vddc;
+	u16                     current_vddci;
+	u32                     default_sclk;
+	u32                     default_mclk;
+	u16                     default_vddc;
+	u16                     default_vddci;
+	struct radeon_i2c_chan *i2c_bus;
+	/* selected pm method */
+	enum radeon_pm_method     pm_method;
+	/* dynpm power management */
+#ifdef FREEBSD_WIP
+	struct delayed_work	dynpm_idle_work;
+#endif /* FREEBSD_WIP */
+	enum radeon_dynpm_state	dynpm_state;
+	enum radeon_dynpm_action	dynpm_planned_action;
+	unsigned long		dynpm_action_timeout;
+	bool                    dynpm_can_upclock;
+	bool                    dynpm_can_downclock;
+	/* profile-based power management */
+	enum radeon_pm_profile_type profile;
+	int                     profile_index;
+	struct radeon_pm_profile profiles[PM_PROFILE_MAX];
+	/* internal thermal controller on rv6xx+ */
+	enum radeon_int_thermal_type int_thermal_type;
+#ifdef FREEBSD_WIP
+	struct device	        *int_hwmon_dev;
+#endif /* FREEBSD_WIP */
+};
+
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+			     enum radeon_pm_state_type ps_type,
+			     int instance);
+
+struct r600_audio {
+	int			channels;
+	int			rate;
+	int			bits_per_sample;
+	u8			status_bits;
+	u8			category_code;
+};
+
+/*
+ * Benchmarking
+ */
+void radeon_benchmark(struct radeon_device *rdev, int test_number);
+
+
+/*
+ * Testing
+ */
+void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+			   struct radeon_ring *cpA,
+			   struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
+
+
+/*
+ * Debugfs
+ */
+struct radeon_debugfs {
+	struct drm_info_list	*files;
+	unsigned		num_files;
+};
+
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned nfiles);
+int radeon_debugfs_fence_init(struct radeon_device *rdev);
+
+
+/*
+ * ASIC specific functions.
+ */
+struct radeon_asic {
+	int (*init)(struct radeon_device *rdev);
+	void (*fini)(struct radeon_device *rdev);
+	int (*resume)(struct radeon_device *rdev);
+	int (*suspend)(struct radeon_device *rdev);
+	void (*vga_set_state)(struct radeon_device *rdev, bool state);
+	int (*asic_reset)(struct radeon_device *rdev);
+	/* ioctl hw specific callback. Some hw might want to perform special
+	 * operation on specific ioctl. For instance on wait idle some hw
+	 * might want to perform and HDP flush through MMIO as it seems that
+	 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+	 * through ring.
+	 */
+	void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+	/* check if 3D engine is idle */
+	bool (*gui_idle)(struct radeon_device *rdev);
+	/* wait for mc_idle */
+	int (*mc_wait_for_idle)(struct radeon_device *rdev);
+	/* gart */
+	struct {
+		void (*tlb_flush)(struct radeon_device *rdev);
+		int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+	} gart;
+	struct {
+		int (*init)(struct radeon_device *rdev);
+		void (*fini)(struct radeon_device *rdev);
+
+		u32 pt_ring_index;
+		void (*set_page)(struct radeon_device *rdev, uint64_t pe,
+				 uint64_t addr, unsigned count,
+				 uint32_t incr, uint32_t flags);
+	} vm;
+	/* ring specific callbacks */
+	struct {
+		void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+		int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+		void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+		void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+				       struct radeon_semaphore *semaphore, bool emit_wait);
+		int (*cs_parse)(struct radeon_cs_parser *p);
+		void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+		int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+		int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+		bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+		void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+	} ring[RADEON_NUM_RINGS];
+	/* irqs */
+	struct {
+		int (*set)(struct radeon_device *rdev);
+		irqreturn_t (*process)(struct radeon_device *rdev);
+	} irq;
+	/* displays */
+	struct {
+		/* display watermarks */
+		void (*bandwidth_update)(struct radeon_device *rdev);
+		/* get frame count */
+		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
+		/* wait for vblank */
+		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
+		/* set backlight level */
+		void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
+		/* get backlight level */
+		u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
+	} display;
+	/* copy functions for bo handling */
+	struct {
+		int (*blit)(struct radeon_device *rdev,
+			    uint64_t src_offset,
+			    uint64_t dst_offset,
+			    unsigned num_gpu_pages,
+			    struct radeon_fence **fence);
+		u32 blit_ring_index;
+		int (*dma)(struct radeon_device *rdev,
+			   uint64_t src_offset,
+			   uint64_t dst_offset,
+			   unsigned num_gpu_pages,
+			   struct radeon_fence **fence);
+		u32 dma_ring_index;
+		/* method used for bo copy */
+		int (*copy)(struct radeon_device *rdev,
+			    uint64_t src_offset,
+			    uint64_t dst_offset,
+			    unsigned num_gpu_pages,
+			    struct radeon_fence **fence);
+		/* ring used for bo copies */
+		u32 copy_ring_index;
+	} copy;
+	/* surfaces */
+	struct {
+		int (*set_reg)(struct radeon_device *rdev, int reg,
+				       uint32_t tiling_flags, uint32_t pitch,
+				       uint32_t offset, uint32_t obj_size);
+		void (*clear_reg)(struct radeon_device *rdev, int reg);
+	} surface;
+	/* hotplug detect */
+	struct {
+		void (*init)(struct radeon_device *rdev);
+		void (*fini)(struct radeon_device *rdev);
+		bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+		void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+	} hpd;
+	/* power management */
+	struct {
+		void (*misc)(struct radeon_device *rdev);
+		void (*prepare)(struct radeon_device *rdev);
+		void (*finish)(struct radeon_device *rdev);
+		void (*init_profile)(struct radeon_device *rdev);
+		void (*get_dynpm_state)(struct radeon_device *rdev);
+		uint32_t (*get_engine_clock)(struct radeon_device *rdev);
+		void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+		uint32_t (*get_memory_clock)(struct radeon_device *rdev);
+		void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+		int (*get_pcie_lanes)(struct radeon_device *rdev);
+		void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+		void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+	} pm;
+	/* pageflipping */
+	struct {
+		void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+		u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+		void (*post_page_flip)(struct radeon_device *rdev, int crtc);
+	} pflip;
+};
+
+/*
+ * Asic structures
+ */
+struct r100_asic {
+	const unsigned		*reg_safe_bm;
+	unsigned		reg_safe_bm_size;
+	u32			hdp_cntl;
+};
+
+struct r300_asic {
+	const unsigned		*reg_safe_bm;
+	unsigned		reg_safe_bm_size;
+	u32			resync_scratch;
+	u32			hdp_cntl;
+};
+
+struct r600_asic {
+	unsigned		max_pipes;
+	unsigned		max_tile_pipes;
+	unsigned		max_simds;
+	unsigned		max_backends;
+	unsigned		max_gprs;
+	unsigned		max_threads;
+	unsigned		max_stack_entries;
+	unsigned		max_hw_contexts;
+	unsigned		max_gs_threads;
+	unsigned		sx_max_export_size;
+	unsigned		sx_max_export_pos_size;
+	unsigned		sx_max_export_smx_size;
+	unsigned		sq_num_cf_insts;
+	unsigned		tiling_nbanks;
+	unsigned		tiling_npipes;
+	unsigned		tiling_group_size;
+	unsigned		tile_config;
+	unsigned		backend_map;
+};
+
+struct rv770_asic {
+	unsigned		max_pipes;
+	unsigned		max_tile_pipes;
+	unsigned		max_simds;
+	unsigned		max_backends;
+	unsigned		max_gprs;
+	unsigned		max_threads;
+	unsigned		max_stack_entries;
+	unsigned		max_hw_contexts;
+	unsigned		max_gs_threads;
+	unsigned		sx_max_export_size;
+	unsigned		sx_max_export_pos_size;
+	unsigned		sx_max_export_smx_size;
+	unsigned		sq_num_cf_insts;
+	unsigned		sx_num_of_sets;
+	unsigned		sc_prim_fifo_size;
+	unsigned		sc_hiz_tile_fifo_size;
+	unsigned		sc_earlyz_tile_fifo_fize;
+	unsigned		tiling_nbanks;
+	unsigned		tiling_npipes;
+	unsigned		tiling_group_size;
+	unsigned		tile_config;
+	unsigned		backend_map;
+};
+
+struct evergreen_asic {
+	unsigned num_ses;
+	unsigned max_pipes;
+	unsigned max_tile_pipes;
+	unsigned max_simds;
+	unsigned max_backends;
+	unsigned max_gprs;
+	unsigned max_threads;
+	unsigned max_stack_entries;
+	unsigned max_hw_contexts;
+	unsigned max_gs_threads;
+	unsigned sx_max_export_size;
+	unsigned sx_max_export_pos_size;
+	unsigned sx_max_export_smx_size;
+	unsigned sq_num_cf_insts;
+	unsigned sx_num_of_sets;
+	unsigned sc_prim_fifo_size;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+	unsigned tiling_nbanks;
+	unsigned tiling_npipes;
+	unsigned tiling_group_size;
+	unsigned tile_config;
+	unsigned backend_map;
+};
+
+struct cayman_asic {
+	unsigned max_shader_engines;
+	unsigned max_pipes_per_simd;
+	unsigned max_tile_pipes;
+	unsigned max_simds_per_se;
+	unsigned max_backends_per_se;
+	unsigned max_texture_channel_caches;
+	unsigned max_gprs;
+	unsigned max_threads;
+	unsigned max_gs_threads;
+	unsigned max_stack_entries;
+	unsigned sx_num_of_sets;
+	unsigned sx_max_export_size;
+	unsigned sx_max_export_pos_size;
+	unsigned sx_max_export_smx_size;
+	unsigned max_hw_contexts;
+	unsigned sq_num_cf_insts;
+	unsigned sc_prim_fifo_size;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+
+	unsigned num_shader_engines;
+	unsigned num_shader_pipes_per_simd;
+	unsigned num_tile_pipes;
+	unsigned num_simds_per_se;
+	unsigned num_backends_per_se;
+	unsigned backend_disable_mask_per_asic;
+	unsigned backend_map;
+	unsigned num_texture_channel_caches;
+	unsigned mem_max_burst_length_bytes;
+	unsigned mem_row_size_in_kb;
+	unsigned shader_engine_tile_size;
+	unsigned num_gpus;
+	unsigned multi_gpu_tile_size;
+
+	unsigned tile_config;
+};
+
+struct si_asic {
+	unsigned max_shader_engines;
+	unsigned max_tile_pipes;
+	unsigned max_cu_per_sh;
+	unsigned max_sh_per_se;
+	unsigned max_backends_per_se;
+	unsigned max_texture_channel_caches;
+	unsigned max_gprs;
+	unsigned max_gs_threads;
+	unsigned max_hw_contexts;
+	unsigned sc_prim_fifo_size_frontend;
+	unsigned sc_prim_fifo_size_backend;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+
+	unsigned num_tile_pipes;
+	unsigned num_backends_per_se;
+	unsigned backend_disable_mask_per_asic;
+	unsigned backend_map;
+	unsigned num_texture_channel_caches;
+	unsigned mem_max_burst_length_bytes;
+	unsigned mem_row_size_in_kb;
+	unsigned shader_engine_tile_size;
+	unsigned num_gpus;
+	unsigned multi_gpu_tile_size;
+
+	unsigned tile_config;
+};
+
+union radeon_asic_config {
+	struct r300_asic	r300;
+	struct r100_asic	r100;
+	struct r600_asic	r600;
+	struct rv770_asic	rv770;
+	struct evergreen_asic	evergreen;
+	struct cayman_asic	cayman;
+	struct si_asic		si;
+};
+
+/*
+ * asic initizalization from radeon_asic.c
+ */
+int radeon_asic_init(struct radeon_device *rdev);
+
+
+/*
+ * IOCTL.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp);
+int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+
+/* VRAM scratch page for HDP bug, default vram page */
+struct r600_vram_scratch {
+	struct radeon_bo		*robj;
+	volatile uint32_t		*ptr;
+	u64				gpu_addr;
+};
+
+/*
+ * ACPI
+ */
+struct radeon_atif_notification_cfg {
+	bool enabled;
+	int command_code;
+};
+
+struct radeon_atif_notifications {
+	bool display_switch;
+	bool expansion_mode_change;
+	bool thermal_state;
+	bool forced_power_state;
+	bool system_power_state;
+	bool display_conf_change;
+	bool px_gfx_switch;
+	bool brightness_change;
+	bool dgpu_display_event;
+};
+
+struct radeon_atif_functions {
+	bool system_params;
+	bool sbios_requests;
+	bool select_active_disp;
+	bool lid_state;
+	bool get_tv_standard;
+	bool set_tv_standard;
+	bool get_panel_expansion_mode;
+	bool set_panel_expansion_mode;
+	bool temperature_change;
+	bool graphics_device_types;
+};
+
+struct radeon_atif {
+	struct radeon_atif_notifications notifications;
+	struct radeon_atif_functions functions;
+	struct radeon_atif_notification_cfg notification_cfg;
+	struct radeon_encoder *encoder_for_bl;
+};
+
+struct radeon_atcs_functions {
+	bool get_ext_state;
+	bool pcie_perf_req;
+	bool pcie_dev_rdy;
+	bool pcie_bus_width;
+};
+
+struct radeon_atcs {
+	struct radeon_atcs_functions functions;
+};
+
+/*
+ * Core structure, functions and helpers.
+ */
+typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
+typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
+
+struct radeon_device {
+	device_t			dev;
+	struct drm_device		*ddev;
+	struct sx			exclusive_lock;
+	/* ASIC */
+	union radeon_asic_config	config;
+	enum radeon_family		family;
+	unsigned long			flags;
+	int				usec_timeout;
+	enum radeon_pll_errata		pll_errata;
+	int				num_gb_pipes;
+	int				num_z_pipes;
+	int				disp_priority;
+	/* BIOS */
+	uint8_t				*bios;
+	bool				is_atom_bios;
+	uint16_t			bios_header_start;
+	struct radeon_bo		*stollen_vga_memory;
+	/* Register mmio */
+	resource_size_t			rmmio_base;
+	resource_size_t			rmmio_size;
+	/* protects concurrent MM_INDEX/DATA based register access */
+	struct mtx			mmio_idx_lock;
+	int				rmmio_rid;
+	struct resource			*rmmio;
+	radeon_rreg_t			mc_rreg;
+	radeon_wreg_t			mc_wreg;
+	radeon_rreg_t			pll_rreg;
+	radeon_wreg_t			pll_wreg;
+	uint32_t                        pcie_reg_mask;
+	radeon_rreg_t			pciep_rreg;
+	radeon_wreg_t			pciep_wreg;
+	/* io port */
+	int				rio_rid;
+	struct resource			*rio_mem;
+	resource_size_t			rio_mem_size;
+	struct radeon_clock             clock;
+	struct radeon_mc		mc;
+	struct radeon_gart		gart;
+	struct radeon_mode_info		mode_info;
+	struct radeon_scratch		scratch;
+	struct radeon_mman		mman;
+	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
+	struct cv			fence_queue;
+	struct mtx			fence_queue_mtx;
+	struct sx			ring_lock;
+	struct radeon_ring		ring[RADEON_NUM_RINGS];
+	bool				ib_pool_ready;
+	struct radeon_sa_manager	ring_tmp_bo;
+	struct radeon_irq		irq;
+	struct radeon_asic		*asic;
+	struct radeon_gem		gem;
+	struct radeon_pm		pm;
+	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
+	struct radeon_wb		wb;
+	struct radeon_dummy_page	dummy_page;
+	bool				shutdown;
+	bool				suspend;
+	bool				need_dma32;
+	bool				accel_working;
+	bool				fictitious_range_registered;
+	bool				fictitious_agp_range_registered;
+	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
+	const struct firmware *me_fw;	/* all family ME firmware */
+	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
+	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
+	const struct firmware *mc_fw;	/* NI MC firmware */
+	const struct firmware *ce_fw;	/* SI CE firmware */
+	struct r600_blit r600_blit;
+	struct r600_vram_scratch vram_scratch;
+	int msi_enabled; /* msi enabled */
+	struct r600_ih ih; /* r6/700 interrupt ring */
+	struct si_rlc rlc;
+	struct taskqueue *tq;
+	struct task hotplug_work;
+	struct task audio_work;
+	int num_crtc; /* number of crtcs */
+	struct sx dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+	bool audio_enabled;
+	struct r600_audio audio_status; /* audio stuff */
+#if defined(CONFIG_ACPI)
+	struct {
+		ACPI_HANDLE		handle;
+		ACPI_NOTIFY_HANDLER	notifier_call;
+	} acpi;
+#endif
+	/* only one userspace can use Hyperz features or CMASK at a time */
+	struct drm_file *hyperz_filp;
+	struct drm_file *cmask_filp;
+	/* i2c buses */
+	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+	/* debugfs */
+	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+	unsigned 		debugfs_count;
+	/* virtual memory */
+	struct radeon_vm_manager	vm_manager;
+	struct sx			gpu_clock_mutex;
+	/* ACPI interface */
+	struct radeon_atif		atif;
+	struct radeon_atcs		atcs;
+};
+
+int radeon_device_init(struct radeon_device *rdev,
+		       struct drm_device *ddev,
+		       uint32_t flags);
+void radeon_device_fini(struct radeon_device *rdev);
+int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+		      bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+		  bool always_indirect);
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
+
+/*
+ * Registers read & write functions.
+ */
+#define RREG8(reg) bus_read_1((rdev->rmmio), (reg))
+#define WREG8(reg, v) bus_write_1((rdev->rmmio), (reg), v)
+#define RREG16(reg) bus_read_2((rdev->rmmio), (reg))
+#define WREG16(reg, v) bus_write_2((rdev->rmmio), (reg), v)
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) DRM_INFO("REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
+#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
+#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
+#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
+#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
+#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
+#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
+#define WREG32_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32(reg);			\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32(reg, tmp_);				\
+	} while (0)
+#define WREG32_PLL_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32_PLL(reg);		\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32_PLL(reg, tmp_);				\
+	} while (0)
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
+#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
+#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
+
+/*
+ * Indirect registers accessor
+ */
+static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+	r = RREG32(RADEON_PCIE_DATA);
+	return r;
+}
+
+static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+	WREG32(RADEON_PCIE_DATA, (v));
+}
+
+void r100_pll_errata_after_index(struct radeon_device *rdev);
+
+
+/*
+ * ASICs helpers.
+ */
+#define ASIC_IS_RN50(rdev) ((rdev->ddev->pci_device == 0x515e) || \
+			    (rdev->ddev->pci_device == 0x5969))
+#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
+		(rdev->family == CHIP_RV200) || \
+		(rdev->family == CHIP_RS100) || \
+		(rdev->family == CHIP_RS200) || \
+		(rdev->family == CHIP_RV250) || \
+		(rdev->family == CHIP_RV280) || \
+		(rdev->family == CHIP_RS300))
+#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300)  ||	\
+		(rdev->family == CHIP_RV350) ||			\
+		(rdev->family == CHIP_R350)  ||			\
+		(rdev->family == CHIP_RV380) ||			\
+		(rdev->family == CHIP_R420)  ||			\
+		(rdev->family == CHIP_R423)  ||			\
+		(rdev->family == CHIP_RV410) ||			\
+		(rdev->family == CHIP_RS400) ||			\
+		(rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pci_device == 0x9441) || \
+		(rdev->ddev->pci_device == 0x9443) || \
+		(rdev->ddev->pci_device == 0x944B) || \
+		(rdev->ddev->pci_device == 0x9506) || \
+		(rdev->ddev->pci_device == 0x9509) || \
+		(rdev->ddev->pci_device == 0x950F) || \
+		(rdev->ddev->pci_device == 0x689C) || \
+		(rdev->ddev->pci_device == 0x689D))
+#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
+#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
+			    (rdev->family == CHIP_RS690)  ||	\
+			    (rdev->family == CHIP_RS740)  ||	\
+			    (rdev->family >= CHIP_R600))
+#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
+#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
+#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+			     (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
+#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
+#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
+			     (rdev->flags & RADEON_IS_IGP))
+
+/*
+ * BIOS helpers.
+ */
+#define RBIOS8(i) (rdev->bios[i])
+#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
+#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
+
+int radeon_combios_init(struct radeon_device *rdev);
+void radeon_combios_fini(struct radeon_device *rdev);
+int radeon_atombios_init(struct radeon_device *rdev);
+void radeon_atombios_fini(struct radeon_device *rdev);
+
+
+/*
+ * RING helpers.
+ */
+#if DRM_DEBUG_CODE == 0
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+	ring->ring[ring->wptr++] = v;
+	ring->wptr &= ring->ptr_mask;
+	ring->count_dw--;
+	ring->ring_free_dw--;
+}
+#else
+/* With debugging this is just too big to inline */
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
+#endif
+
+/*
+ * ASICs macro.
+ */
+#define radeon_init(rdev) (rdev)->asic->init((rdev))
+#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
+#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
+#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
+#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
+#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
+#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
+#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
+#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
+#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
+#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
+#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
+#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
+#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
+#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
+#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
+#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
+#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
+#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
+#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
+#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
+#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
+#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
+#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
+#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
+#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
+#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
+#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
+#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
+#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
+
+/* Common functions */
+/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void radeon_agp_disable(struct radeon_device *rdev);
+extern int radeon_modeset_init(struct radeon_device *rdev);
+extern void radeon_modeset_fini(struct radeon_device *rdev);
+extern bool radeon_card_posted(struct radeon_device *rdev);
+extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
+extern void radeon_update_display_priority(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
+extern void radeon_scratch_init(struct radeon_device *rdev);
+extern void radeon_wb_fini(struct radeon_device *rdev);
+extern int radeon_wb_init(struct radeon_device *rdev);
+extern void radeon_wb_disable(struct radeon_device *rdev);
+extern void radeon_surface_init(struct radeon_device *rdev);
+extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
+extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
+extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+extern int radeon_resume_kms(struct drm_device *dev);
+extern int radeon_suspend_kms(struct drm_device *dev);
+extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
+
+/*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+				       struct radeon_vm *vm, int ring);
+void radeon_vm_fence(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_fence *fence);
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+			    struct radeon_vm *vm,
+			    struct radeon_bo *bo,
+			    struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+			     struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+				       struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+				      struct radeon_vm *vm,
+				      struct radeon_bo *bo);
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+			  struct radeon_bo_va *bo_va,
+			  uint64_t offset,
+			  uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+		     struct radeon_bo_va *bo_va);
+
+/* audio */
+void r600_audio_update_hdmi(void *arg, int pending);
+
+/*
+ * R600 vram scratch functions
+ */
+int r600_vram_scratch_init(struct radeon_device *rdev);
+void r600_vram_scratch_fini(struct radeon_device *rdev);
+
+/*
+ * r600 cs checking helper
+ */
+unsigned r600_mip_minify(unsigned size, unsigned level);
+bool r600_fmt_is_valid_color(u32 format);
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
+int r600_fmt_get_blocksize(u32 format);
+int r600_fmt_get_nblocksx(u32 format, u32 w);
+int r600_fmt_get_nblocksy(u32 format, u32 h);
+
+/*
+ * r600 functions used by radeon_encoder.c
+ */
+struct radeon_hdmi_acr {
+	u32 clock;
+
+	int n_32khz;
+	int cts_32khz;
+
+	int n_44_1khz;
+	int cts_44_1khz;
+
+	int n_48khz;
+	int cts_48khz;
+
+};
+
+extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+
+extern void r600_hdmi_enable(struct drm_encoder *encoder);
+extern void r600_hdmi_disable(struct drm_encoder *encoder);
+extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+				     u32 tiling_pipe_num,
+				     u32 max_rb_num,
+				     u32 total_max_rb_num,
+				     u32 enabled_rb_mask);
+
+/*
+ * evergreen functions used by radeon_encoder.c
+ */
+
+extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+extern void ni_fini_microcode(struct radeon_device *rdev);
+
+/* radeon_acpi.c */
+#if defined(CONFIG_ACPI)
+extern int radeon_acpi_init(struct radeon_device *rdev);
+extern void radeon_acpi_fini(struct radeon_device *rdev);
+#else
+static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
+static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
+#endif
+
+/* Prototypes added by @dumbbell. */
+
+/* atombios_encoders.c */
+void	radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+	    struct drm_connector *drm_connector);
+void	radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
+	    uint32_t supported_device, u16 caps);
+
+/* radeon_atombios.c */
+bool	radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+	    struct drm_display_mode *mode);
+
+/* radeon_combios.c */
+void	radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+	    struct drm_encoder *encoder, bool connected);
+
+/* radeon_connectors.c */
+void	radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+	    struct drm_encoder *encoder, bool connected);
+void	radeon_add_legacy_connector(struct drm_device *dev,
+	    uint32_t connector_id,
+	    uint32_t supported_device,
+	    int connector_type,
+	    struct radeon_i2c_bus_rec *i2c_bus,
+	    uint16_t connector_object_id,
+	    struct radeon_hpd *hpd);
+void	radeon_add_atom_connector(struct drm_device *dev,
+	    uint32_t connector_id,
+	    uint32_t supported_device,
+	    int connector_type,
+	    struct radeon_i2c_bus_rec *i2c_bus,
+	    uint32_t igp_lane_info,
+	    uint16_t connector_object_id,
+	    struct radeon_hpd *hpd,
+	    struct radeon_router *router);
+
+/* radeon_encoders.c */
+uint32_t	radeon_get_encoder_enum(struct drm_device *dev,
+		    uint32_t supported_device, uint8_t dac);
+void		radeon_link_encoder_connector(struct drm_device *dev);
+
+/* radeon_legacy_encoders.c */
+void	radeon_add_legacy_encoder(struct drm_device *dev,
+	    uint32_t encoder_enum, uint32_t supported_device);
+void	radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+	    struct drm_connector *drm_connector);
+
+/* radeon_pm.c */
+void	radeon_pm_acpi_event_handler(struct radeon_device *rdev);
+
+/* radeon_ttm.c */
+int	radeon_ttm_init(struct radeon_device *rdev);
+void	radeon_ttm_fini(struct radeon_device *rdev);
+
+/* radeon_fb.c */
+struct fb_info *	radeon_fb_helper_getinfo(device_t kdev);
+
+/* r600.c */
+int r600_ih_ring_alloc(struct radeon_device *rdev);
+void r600_ih_ring_fini(struct radeon_device *rdev);
+
+#include "radeon_object.h"
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_acpi.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_acpi.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_acpi.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,640 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_acpi.c 314593 2017-03-03 12:03:50Z avg $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include "radeon.h"
+#include "radeon_acpi.h"
+#include "atom.h"
+
+#define ACPI_AC_CLASS           "ac_adapter"
+
+struct atif_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 notification_mask;	/* supported notifications mask */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atif_system_params {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 valid_mask;		/* valid flags mask */
+	u32 flags;		/* flags */
+	u8 command_code;	/* notify command code */
+} __packed;
+
+struct atif_sbios_requests {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 pending;		/* pending sbios requests */
+	u8 panel_exp_mode;	/* panel expansion mode */
+	u8 thermal_gfx;		/* thermal state: target gfx controller */
+	u8 thermal_state;	/* thermal state: state id (0: exit state, non-0: state) */
+	u8 forced_power_gfx;	/* forced power state: target gfx controller */
+	u8 forced_power_state;	/* forced power state: state id */
+	u8 system_power_src;	/* system power source */
+	u8 backlight_level;	/* panel backlight level (0-255) */
+} __packed;
+
+#define ATIF_NOTIFY_MASK	0x3
+#define ATIF_NOTIFY_NONE	0
+#define ATIF_NOTIFY_81		1
+#define ATIF_NOTIFY_N		2
+
+struct atcs_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+/* Call the ATIF method
+ */
+/**
+ * radeon_atif_call - call an ATIF method
+ *
+ * @handle: acpi handle
+ * @function: the ATIF function to execute
+ * @params: ATIF function params
+ *
+ * Executes the requested ATIF function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static ACPI_OBJECT *radeon_atif_call(ACPI_HANDLE handle, int function,
+		ACPI_BUFFER *params)
+{
+	ACPI_STATUS status;
+	ACPI_OBJECT atif_arg_elements[2];
+	ACPI_OBJECT_LIST atif_arg;
+	ACPI_BUFFER buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atif_arg.Count = 2;
+	atif_arg.Pointer = &atif_arg_elements[0];
+
+	atif_arg_elements[0].Type = ACPI_TYPE_INTEGER;
+	atif_arg_elements[0].Integer.Value = function;
+
+	if (params) {
+		atif_arg_elements[1].Type = ACPI_TYPE_BUFFER;
+		atif_arg_elements[1].Buffer.Length = params->Length;
+		atif_arg_elements[1].Buffer.Pointer = params->Pointer;
+	} else {
+		/* We need a second fake parameter */
+		atif_arg_elements[1].Type = ACPI_TYPE_INTEGER;
+		atif_arg_elements[1].Integer.Value = 0;
+	}
+
+	status = AcpiEvaluateObject(handle, "ATIF", &atif_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+				 AcpiFormatException(status));
+		AcpiOsFree(buffer.Pointer);
+		return NULL;
+	}
+
+	return buffer.Pointer;
+}
+
+/**
+ * radeon_atif_parse_notification - parse supported notifications
+ *
+ * @n: supported notifications struct
+ * @mask: supported notifications mask from ATIF
+ *
+ * Use the supported notifications mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask)
+{
+	n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
+	n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
+	n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
+	n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
+	n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
+	n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
+	n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+}
+
+/**
+ * radeon_atif_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATIF
+ *
+ * Use the supported functions mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask)
+{
+	f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
+	f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
+	f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
+	f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
+	f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
+	f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
+	f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
+	f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
+	f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
+	f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+}
+
+/**
+ * radeon_atif_verify_interface - verify ATIF
+ *
+ * @handle: acpi handle
+ * @atif: radeon atif struct
+ *
+ * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
+ * to initialize ATIF and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atif_verify_interface(ACPI_HANDLE handle,
+		struct radeon_atif *atif)
+{
+	ACPI_OBJECT *info;
+	struct atif_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->Buffer.Pointer;
+	if (size < 12) {
+		DRM_INFO("ATIF buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->Buffer.Pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
+
+	radeon_atif_parse_notification(&atif->notifications, output.notification_mask);
+	radeon_atif_parse_functions(&atif->functions, output.function_bits);
+
+out:
+	AcpiOsFree(info);
+	return err;
+}
+
+/**
+ * radeon_atif_get_notification_params - determine notify configuration
+ *
+ * @handle: acpi handle
+ * @n: atif notification configuration struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
+ * to determine if a notifier is used and if so which one
+ * (all asics).  This is either Notify(VGA, 0x81) or Notify(VGA, n)
+ * where n is specified in the result if a notifier is used.
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_notification_params(ACPI_HANDLE handle,
+		struct radeon_atif_notification_cfg *n)
+{
+	ACPI_OBJECT *info;
+	struct atif_system_params params;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+	if (!info) {
+		err = -EIO;
+		goto out;
+	}
+
+	size = *(u16 *) info->Buffer.Pointer;
+	if (size < 10) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	memset(&params, 0, sizeof(params));
+	size = min(sizeof(params), size);
+	memcpy(&params, info->Buffer.Pointer, size);
+
+	DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
+			params.flags, params.valid_mask);
+	params.flags = params.flags & params.valid_mask;
+
+	if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
+		n->enabled = false;
+		n->command_code = 0;
+	} else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
+		n->enabled = true;
+		n->command_code = 0x81;
+	} else {
+		if (size < 11) {
+			err = -EINVAL;
+			goto out;
+		}
+		n->enabled = true;
+		n->command_code = params.command_code;
+	}
+
+out:
+	DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
+			(n->enabled ? "enabled" : "disabled"),
+			n->command_code);
+	AcpiOsFree(info);
+	return err;
+}
+
+/**
+ * radeon_atif_get_sbios_requests - get requested sbios event
+ *
+ * @handle: acpi handle
+ * @req: atif sbios request struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
+ * to determine what requests the sbios is making to the driver
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_sbios_requests(ACPI_HANDLE handle,
+		struct atif_sbios_requests *req)
+{
+	ACPI_OBJECT *info;
+	size_t size;
+	int count = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+	if (!info)
+		return -EIO;
+
+	size = *(u16 *)info->Buffer.Pointer;
+	if (size < 0xd) {
+		count = -EINVAL;
+		goto out;
+	}
+	memset(req, 0, sizeof(*req));
+
+	size = min(sizeof(*req), size);
+	memcpy(req, info->Buffer.Pointer, size);
+	DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
+
+	count = hweight32(req->pending);
+
+out:
+	AcpiOsFree(info);
+	return count;
+}
+
+/**
+ * radeon_atif_handler - handle ATIF notify requests
+ *
+ * @rdev: radeon_device pointer
+ * @event: atif sbios request struct
+ *
+ * Checks the acpi event and if it matches an atif event,
+ * handles it.
+ * Returns NOTIFY code
+ */
+void radeon_atif_handler(struct radeon_device *rdev,
+    UINT32 type)
+{
+	struct radeon_atif *atif = &rdev->atif;
+	struct atif_sbios_requests req;
+	ACPI_HANDLE handle;
+	int count;
+
+	DRM_DEBUG_DRIVER("event, type = %#x\n",
+			type);
+
+	if (!atif->notification_cfg.enabled ||
+			type != atif->notification_cfg.command_code)
+		/* Not our event */
+		return;
+
+	/* Check pending SBIOS requests */
+	handle = rdev->acpi.handle;
+	count = radeon_atif_get_sbios_requests(handle, &req);
+
+	if (count <= 0)
+		return;
+
+	DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+	if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+		struct radeon_encoder *enc = atif->encoder_for_bl;
+
+		if (enc) {
+			DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+					req.backlight_level);
+
+			radeon_set_backlight_level(rdev, enc, req.backlight_level);
+
+#ifdef FREEBSD_WIP
+			if (rdev->is_atom_bios) {
+				struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+				backlight_force_update(dig->bl_dev,
+						       BACKLIGHT_UPDATE_HOTKEY);
+			} else {
+				struct radeon_encoder_lvds *dig = enc->enc_priv;
+				backlight_force_update(dig->bl_dev,
+						       BACKLIGHT_UPDATE_HOTKEY);
+			}
+#endif /* FREEBSD_WIP */
+		}
+	}
+	/* TODO: check other events */
+
+	/* We've handled the event, stop the notifier chain. The ACPI interface
+	 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
+	 * userspace if the event was generated only to signal a SBIOS
+	 * request.
+	 */
+}
+
+/* Call the ATCS method
+ */
+/**
+ * radeon_atcs_call - call an ATCS method
+ *
+ * @handle: acpi handle
+ * @function: the ATCS function to execute
+ * @params: ATCS function params
+ *
+ * Executes the requested ATCS function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atcs_call(ACPI_HANDLE handle, int function,
+					   ACPI_BUFFER *params)
+{
+	ACPI_STATUS status;
+	ACPI_OBJECT atcs_arg_elements[2];
+	ACPI_OBJECT_LIST atcs_arg;
+	ACPI_BUFFER buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atcs_arg.Count = 2;
+	atcs_arg.Pointer = &atcs_arg_elements[0];
+
+	atcs_arg_elements[0].Type = ACPI_TYPE_INTEGER;
+	atcs_arg_elements[0].Integer.Value = function;
+
+	if (params) {
+		atcs_arg_elements[1].Type = ACPI_TYPE_BUFFER;
+		atcs_arg_elements[1].Buffer.Length = params->Length;
+		atcs_arg_elements[1].Buffer.Pointer = params->Pointer;
+	} else {
+		/* We need a second fake parameter */
+		atcs_arg_elements[1].Type = ACPI_TYPE_INTEGER;
+		atcs_arg_elements[1].Integer.Value = 0;
+	}
+
+	status = AcpiEvaluateObject(handle, "ATCS", &atcs_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
+				 AcpiFormatException(status));
+		AcpiOsFree(buffer.Pointer);
+		return NULL;
+	}
+
+	return buffer.Pointer;
+}
+
+/**
+ * radeon_atcs_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATCS
+ *
+ * Use the supported functions mask from ATCS function
+ * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask)
+{
+	f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
+	f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
+	f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
+	f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+}
+
+/**
+ * radeon_atcs_verify_interface - verify ATCS
+ *
+ * @handle: acpi handle
+ * @atcs: radeon atcs struct
+ *
+ * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
+ * to initialize ATCS and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atcs_verify_interface(ACPI_HANDLE handle,
+					struct radeon_atcs *atcs)
+{
+	ACPI_OBJECT *info;
+	struct atcs_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->Buffer.Pointer;
+	if (size < 8) {
+		DRM_INFO("ATCS buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->Buffer.Pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
+
+	radeon_atcs_parse_functions(&atcs->functions, output.function_bits);
+
+out:
+	AcpiOsFree(info);
+	return err;
+}
+
+/**
+ * radeon_acpi_event - handle notify events
+ *
+ * @nb: notifier block
+ * @val: val
+ * @data: acpi event
+ *
+ * Calls relevant radeon functions in response to various
+ * acpi events.
+ * Returns NOTIFY code
+ */
+static void radeon_acpi_event(ACPI_HANDLE handle, UINT32 type,
+    void *context)
+{
+	struct radeon_device *rdev = (struct radeon_device *)context;
+
+#ifdef FREEBSD_WIP
+	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+		if (power_supply_is_system_supplied() > 0)
+			DRM_DEBUG_DRIVER("pm: AC\n");
+		else
+			DRM_DEBUG_DRIVER("pm: DC\n");
+
+		radeon_pm_acpi_event_handler(rdev);
+	}
+#endif /* FREEBSD_WIP */
+
+	/* Check for pending SBIOS requests */
+	radeon_atif_handler(rdev, type);
+}
+
+/* Call all ACPI methods here */
+/**
+ * radeon_acpi_init - init driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the acpi
+ * notifier chain (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_acpi_init(struct radeon_device *rdev)
+{
+	ACPI_HANDLE handle;
+	struct radeon_atif *atif = &rdev->atif;
+	struct radeon_atcs *atcs = &rdev->atcs;
+	int ret;
+
+	/* Get the device handle */
+	handle = acpi_get_handle(rdev->dev);
+
+	/* No need to proceed if we're sure that ATIF is not supported */
+	if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+		return 0;
+
+	/* Call the ATCS method */
+	ret = radeon_atcs_verify_interface(handle, atcs);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+	}
+
+	/* Call the ATIF method */
+	ret = radeon_atif_verify_interface(handle, atif);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+		goto out;
+	}
+
+	if (atif->notifications.brightness_change) {
+		struct drm_encoder *tmp;
+		struct radeon_encoder *target = NULL;
+
+		/* Find the encoder controlling the brightness */
+		list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
+				head) {
+			struct radeon_encoder *enc = to_radeon_encoder(tmp);
+
+			if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+			    enc->enc_priv) {
+				if (rdev->is_atom_bios) {
+					struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						target = enc;
+						break;
+					}
+				} else {
+					struct radeon_encoder_lvds *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						target = enc;
+						break;
+					}
+				}
+			}
+		}
+
+		atif->encoder_for_bl = target;
+		if (!target) {
+			/* Brightness change notification is enabled, but we
+			 * didn't find a backlight controller, this should
+			 * never happen.
+			 */
+			DRM_ERROR("Cannot find a backlight controller\n");
+		}
+	}
+
+	if (atif->functions.sbios_requests && !atif->functions.system_params) {
+		/* XXX check this workraround, if sbios request function is
+		 * present we have to see how it's configured in the system
+		 * params
+		 */
+		atif->functions.system_params = true;
+	}
+
+	if (atif->functions.system_params) {
+		ret = radeon_atif_get_notification_params(handle,
+				&atif->notification_cfg);
+		if (ret) {
+			DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
+					ret);
+			/* Disable notification */
+			atif->notification_cfg.enabled = false;
+		}
+	}
+
+out:
+	rdev->acpi.handle = handle;
+	rdev->acpi.notifier_call = radeon_acpi_event;
+	AcpiInstallNotifyHandler(handle, ACPI_DEVICE_NOTIFY,
+	    rdev->acpi.notifier_call, rdev);
+
+	return ret;
+}
+
+/**
+ * radeon_acpi_fini - tear down driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void radeon_acpi_fini(struct radeon_device *rdev)
+{
+	AcpiRemoveNotifyHandler(rdev->acpi.handle, ACPI_DEVICE_NOTIFY,
+	    rdev->acpi.notifier_call);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_acpi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_acpi.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_acpi.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_acpi.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,447 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_acpi.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef RADEON_ACPI_H
+#define RADEON_ACPI_H
+
+struct radeon_device;
+
+void radeon_atif_handler(struct radeon_device *rdev, UINT32 type);
+
+/* AMD hw uses four ACPI control methods:
+ * 1. ATIF
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATIF provides an entry point for the gfx driver to interact with the sbios.
+ * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
+ * notification. Which notification is used as indicated by the ATIF Control
+ * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
+ * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
+ * to identify pending System BIOS requests and associated parameters. For
+ * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
+ * will perform display device detection and invoke ATIF Control Method
+ * SELECT_ACTIVE_DISPLAYS.
+ *
+ * 2. ATPX
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATPX methods are used on PowerXpress systems to handle mux switching and
+ * discrete GPU power control.
+ *
+ * 3. ATRM
+ * ARG0: (ACPI_INTEGER) offset of vbios rom data
+ * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
+ * OUTPUT: (ACPI_BUFFER) output buffer
+ * ATRM provides an interfacess to access the discrete GPU vbios image on
+ * PowerXpress systems with multiple GPUs.
+ *
+ * 4. ATCS
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATCS provides an interface to AMD chipset specific functionality.
+ *
+ */
+/* ATIF */
+#define ATIF_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported notifications mask
+ * DWORD - supported functions bit vector
+ */
+/* Notifications mask */
+#       define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED               (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED        (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED         (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED    (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED   (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED          (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED                (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED      (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED                   (1 << 8)
+/* supported functions vector */
+#       define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED               (1 << 0)
+#       define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED            (1 << 1)
+#       define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED              (1 << 2)
+#       define ATIF_GET_LID_STATE_SUPPORTED                       (1 << 3)
+#       define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED           (1 << 4)
+#       define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED             (1 << 5)
+#       define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED  (1 << 6)
+#       define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED    (1 << 7)
+#       define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED     (1 << 12)
+#       define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED           (1 << 14)
+#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS                        0x1
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ *
+ * OR
+ *
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ * BYTE  - notify command code
+ *
+ * flags
+ * bits 1:0:
+ * 0 - Notify(VGA, 0x81) is not used for notification
+ * 1 - Notify(VGA, 0x81) is used for notification
+ * 2 - Notify(VGA, n) is used for notification where
+ * n (0xd0-0xd9) is specified in notify command code.
+ * bit 2:
+ * 1 - lid changes not reported though int10
+ */
+#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS                     0x2
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - pending sbios requests
+ * BYTE  - panel expansion mode
+ * BYTE  - thermal state: target gfx controller
+ * BYTE  - thermal state: state id (0: exit state, non-0: state)
+ * BYTE  - forced power state: target gfx controller
+ * BYTE  - forced power state: state id
+ * BYTE  - system power source
+ * BYTE  - panel backlight level (0-255)
+ */
+/* pending sbios requests */
+#       define ATIF_DISPLAY_SWITCH_REQUEST                         (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST                  (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST                   (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST              (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST             (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST                    (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST                          (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST                (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT                             (1 << 8)
+/* panel expansion mode */
+#       define ATIF_PANEL_EXPANSION_DISABLE                        0
+#       define ATIF_PANEL_EXPANSION_FULL                           1
+#       define ATIF_PANEL_EXPANSION_ASPECT                         2
+/* target gfx controller */
+#       define ATIF_TARGET_GFX_SINGLE                              0
+#       define ATIF_TARGET_GFX_PX_IGPU                             1
+#       define ATIF_TARGET_GFX_PX_DGPU                             2
+/* system power source */
+#       define ATIF_POWER_SOURCE_AC                                1
+#       define ATIF_POWER_SOURCE_DC                                2
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_1                   3
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_2                   4
+#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS                       0x3
+/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ * WORD  - connected displays
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ */
+#       define ATIF_LCD1                                           (1 << 0)
+#       define ATIF_CRT1                                           (1 << 1)
+#       define ATIF_TV                                             (1 << 2)
+#       define ATIF_DFP1                                           (1 << 3)
+#       define ATIF_CRT2                                           (1 << 4)
+#       define ATIF_LCD2                                           (1 << 5)
+#       define ATIF_DFP2                                           (1 << 7)
+#       define ATIF_CV                                             (1 << 8)
+#       define ATIF_DFP3                                           (1 << 9)
+#       define ATIF_DFP4                                           (1 << 10)
+#       define ATIF_DFP5                                           (1 << 11)
+#       define ATIF_DFP6                                           (1 << 12)
+#define ATIF_FUNCTION_GET_LID_STATE                                0x4
+/* ARG0: ATIF_FUNCTION_GET_LID_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - lid state (0: open, 1: closed)
+ *
+ * GET_LID_STATE only works at boot and resume, for general lid
+ * status, use the kernel provided status
+ */
+#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS                    0x5
+/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ */
+#       define ATIF_TV_STD_NTSC                                    0
+#       define ATIF_TV_STD_PAL                                     1
+#       define ATIF_TV_STD_PALM                                    2
+#       define ATIF_TV_STD_PAL60                                   3
+#       define ATIF_TV_STD_NTSCJ                                   4
+#       define ATIF_TV_STD_PALCN                                   5
+#       define ATIF_TV_STD_PALN                                    6
+#       define ATIF_TV_STD_SCART_RGB                               9
+#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS                      0x6
+/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS           0x7
+/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ */
+#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS             0x8
+/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION              0xD
+/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - gfx controller id
+ * BYTE  - current temperature (degress Celsius)
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES                    0xF
+/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of gfx devices
+ * WORD  - device structure size in bytes (excludes device size field)
+ * DWORD - flags         \
+ * WORD  - bus number     } repeated structure
+ * WORD  - device number /
+ */
+/* flags */
+#       define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE                   (1 << 0)
+#       define ATIF_XGP_PORT                                       (1 << 1)
+#       define ATIF_VGA_ENABLED_GRAPHICS_DEVICE                    (1 << 2)
+#       define ATIF_XGP_PORT_IN_DOCK                               (1 << 3)
+
+/* ATPX */
+#define ATPX_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATPX_GET_PX_PARAMETERS_SUPPORTED                    (1 << 0)
+#       define ATPX_POWER_CONTROL_SUPPORTED                        (1 << 1)
+#       define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED                  (1 << 2)
+#       define ATPX_I2C_MUX_CONTROL_SUPPORTED                      (1 << 3)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED   (1 << 5)
+#       define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED       (1 << 7)
+#       define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED          (1 << 8)
+#define ATPX_FUNCTION_GET_PX_PARAMETERS                            0x1
+/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ */
+/* flags */
+#       define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 0)
+#       define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 1)
+#       define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 2)
+#       define ATPX_CRT1_RGB_SIGNAL_MUXED                          (1 << 3)
+#       define ATPX_TV_SIGNAL_MUXED                                (1 << 4)
+#       define ATPX_DFP_SIGNAL_MUXED                               (1 << 5)
+#       define ATPX_SEPARATE_MUX_FOR_I2C                           (1 << 6)
+#       define ATPX_DYNAMIC_PX_SUPPORTED                           (1 << 7)
+#       define ATPX_ACF_NOT_SUPPORTED                              (1 << 8)
+#       define ATPX_FIXED_NOT_SUPPORTED                            (1 << 9)
+#       define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED               (1 << 10)
+#       define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS                    (1 << 11)
+#define ATPX_FUNCTION_POWER_CONTROL                                0x2
+/* ARG0: ATPX_FUNCTION_POWER_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - dGPU power state (0: power off, 1: power on)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL                          0x3
+/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - display mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#       define ATPX_INTEGRATED_GPU                                 0
+#       define ATPX_DISCRETE_GPU                                   1
+#define ATPX_FUNCTION_I2C_MUX_CONTROL                              0x4
+/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION    0x5
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION      0x6
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING               0x8
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of display connectors
+ * WORD  - connector structure size in bytes (excludes connector size field)
+ * BYTE  - flags                                                     \
+ * BYTE  - ATIF display vector bit position                           } repeated
+ * BYTE  - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
+ * WORD  - connector ACPI id                                         /
+ */
+/* flags */
+#       define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE  (1 << 0)
+#       define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 1)
+#       define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 2)
+#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS                  0x9
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of HPD/DDC ports
+ * WORD  - port structure size in bytes (excludes port size field)
+ * BYTE  - ATIF display vector bit position \
+ * BYTE  - hpd id                            } reapeated structure
+ * BYTE  - ddc id                           /
+ *
+ * available on A+A systems only
+ */
+/* hpd id */
+#       define ATPX_HPD_NONE                                       0
+#       define ATPX_HPD1                                           1
+#       define ATPX_HPD2                                           2
+#       define ATPX_HPD3                                           3
+#       define ATPX_HPD4                                           4
+#       define ATPX_HPD5                                           5
+#       define ATPX_HPD6                                           6
+/* ddc id */
+#       define ATPX_DDC_NONE                                       0
+#       define ATPX_DDC1                                           1
+#       define ATPX_DDC2                                           2
+#       define ATPX_DDC3                                           3
+#       define ATPX_DDC4                                           4
+#       define ATPX_DDC5                                           5
+#       define ATPX_DDC6                                           6
+#       define ATPX_DDC7                                           7
+#       define ATPX_DDC8                                           8
+
+/* ATCS */
+#define ATCS_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATCS_GET_EXTERNAL_STATE_SUPPORTED                   (1 << 0)
+#       define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED             (1 << 1)
+#       define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED       (1 << 2)
+#       define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED                   (1 << 3)
+#define ATCS_FUNCTION_GET_EXTERNAL_STATE                           0x1
+/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags (0: undocked, 1: docked)
+ */
+/* flags */
+#       define ATCS_DOCKED                                         (1 << 0)
+#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST                     0x2
+/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * WORD  - valid flags mask
+ * WORD  - flags
+ * BYTE  - request type
+ * BYTE  - performance request
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - return value
+ */
+/* flags */
+#       define ATCS_ADVERTISE_CAPS                                 (1 << 0)
+#       define ATCS_WAIT_FOR_COMPLETION                            (1 << 1)
+/* request type */
+#       define ATCS_PCIE_LINK_SPEED                                1
+/* performance request */
+#       define ATCS_REMOVE                                         0
+#       define ATCS_FORCE_LOW_POWER                                1
+#       define ATCS_PERF_LEVEL_1                                   2 /* PCIE Gen 1 */
+#       define ATCS_PERF_LEVEL_2                                   3 /* PCIE Gen 2 */
+#       define ATCS_PERF_LEVEL_3                                   4 /* PCIE Gen 3 */
+/* return value */
+#       define ATCS_REQUEST_REFUSED                                1
+#       define ATCS_REQUEST_COMPLETE                               2
+#       define ATCS_REQUEST_IN_PROGRESS                            3
+#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION               0x3
+/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
+#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH                           0x4
+/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE  - number of active lanes
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - number of active lanes
+ */
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_acpi.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_agp.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_agp.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_agp.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,290 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie
+ *    Jerome Glisse <glisse at freedesktop.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_agp.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include <dev/drm2/radeon/radeon_drm.h>
+
+#if __OS_HAS_AGP
+
+struct radeon_agpmode_quirk {
+	u32 hostbridge_vendor;
+	u32 hostbridge_device;
+	u32 chip_vendor;
+	u32 chip_device;
+	u32 subsys_vendor;
+	u32 subsys_device;
+	u32 default_mode;
+};
+
+static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
+	/* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
+	{ PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
+	/* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
+	{ PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
+	/* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
+	{ PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
+		0x148c, 0x2073, 4},
+	/* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
+		PCI_VENDOR_ID_IBM, 0x052f, 1},
+	/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
+		PCI_VENDOR_ID_IBM, 0x0550, 1},
+	/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
+		PCI_VENDOR_ID_IBM, 0x0530, 1},
+	/* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
+		PCI_VENDOR_ID_IBM, 0x054f, 2},
+	/* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x816b, 2},
+	/* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x8195, 8},
+	/* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
+	{ PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
+		PCI_VENDOR_ID_DELL, 0x00e3, 2},
+	/* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
+		PCI_VENDOR_ID_DELL, 0x0149, 1},
+	/* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+		PCI_VENDOR_ID_IBM, 0x0531, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x1025, 0x0061, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x1025, 0x0064, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x10cf, 0x127f, 1},
+	/* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
+	{ 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+		0x1787, 0x5960, 4},
+	/* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
+	{ PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
+		0x17af, 0x2020, 4},
+	/* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
+	{ PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
+		PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
+	/* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
+	{ PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
+		PCI_VENDOR_ID_ATI, 0x013a, 2},
+	/* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+		PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
+	/* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+		PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
+	/* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
+		0x174b, 0x7149, 4},
+	/* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+		0x1462, 0x0380, 4},
+	/* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
+		0x148c, 0x2073, 4},
+	/* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
+	{ PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x8175, 1},
+	/* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
+	{ PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
+		PCI_VENDOR_ID_ATI, 0x0152, 2},
+	{ 0, 0, 0, 0, 0, 0, 0 },
+};
+#endif
+
+int radeon_agp_init(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+	struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
+	struct drm_agp_mode mode;
+	struct drm_agp_info info;
+	uint32_t agp_status;
+	int default_mode;
+	bool is_v3;
+	int ret;
+
+	/* Acquire AGP. */
+	ret = drm_agp_acquire(rdev->ddev);
+	if (ret) {
+		DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+		return ret;
+	}
+
+	ret = drm_agp_info(rdev->ddev, &info);
+	if (ret) {
+		drm_agp_release(rdev->ddev);
+		DRM_ERROR("Unable to get AGP info: %d\n", ret);
+		return ret;
+	}
+
+	if ((rdev->ddev->agp->agp_info.ai_aperture_size >> 20) < 32) {
+		drm_agp_release(rdev->ddev);
+		dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
+			"need at least 32M, disabling AGP\n",
+			rdev->ddev->agp->agp_info.ai_aperture_size >> 20);
+		return -EINVAL;
+	}
+
+	mode.mode = info.mode;
+	/* chips with the agp to pcie bridge don't have the AGP_STATUS register
+	 * Just use the whatever mode the host sets up.
+	 */
+	if (rdev->family <= CHIP_RV350)
+		agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+	else
+		agp_status = mode.mode;
+	is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
+
+	if (is_v3) {
+		default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
+	} else {
+		if (agp_status & RADEON_AGP_4X_MODE) {
+			default_mode = 4;
+		} else if (agp_status & RADEON_AGP_2X_MODE) {
+			default_mode = 2;
+		} else {
+			default_mode = 1;
+		}
+	}
+
+	/* Apply AGPMode Quirks */
+	while (p && p->chip_device != 0) {
+		if (info.id_vendor == p->hostbridge_vendor &&
+		    info.id_device == p->hostbridge_device &&
+		    rdev->ddev->pci_vendor == p->chip_vendor &&
+		    rdev->ddev->pci_device == p->chip_device &&
+		    rdev->ddev->pci_subvendor == p->subsys_vendor &&
+		    rdev->ddev->pci_subdevice == p->subsys_device) {
+			default_mode = p->default_mode;
+		}
+		++p;
+	}
+
+	if (radeon_agpmode > 0) {
+		if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
+		    (radeon_agpmode > (is_v3 ? 8 : 4)) ||
+		    (radeon_agpmode & (radeon_agpmode - 1))) {
+			DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
+				  radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
+				  default_mode);
+			radeon_agpmode = default_mode;
+		} else {
+			DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
+		}
+	} else {
+		radeon_agpmode = default_mode;
+	}
+
+	mode.mode &= ~RADEON_AGP_MODE_MASK;
+	if (is_v3) {
+		switch (radeon_agpmode) {
+		case 8:
+			mode.mode |= RADEON_AGPv3_8X_MODE;
+			break;
+		case 4:
+		default:
+			mode.mode |= RADEON_AGPv3_4X_MODE;
+			break;
+		}
+	} else {
+		switch (radeon_agpmode) {
+		case 4:
+			mode.mode |= RADEON_AGP_4X_MODE;
+			break;
+		case 2:
+			mode.mode |= RADEON_AGP_2X_MODE;
+			break;
+		case 1:
+		default:
+			mode.mode |= RADEON_AGP_1X_MODE;
+			break;
+		}
+	}
+
+	mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
+	ret = drm_agp_enable(rdev->ddev, mode);
+	if (ret) {
+		DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+		drm_agp_release(rdev->ddev);
+		return ret;
+	}
+
+	rdev->mc.agp_base = rdev->ddev->agp->agp_info.ai_aperture_base;
+	rdev->mc.gtt_size = rdev->ddev->agp->agp_info.ai_aperture_size;
+	rdev->mc.gtt_start = rdev->mc.agp_base;
+	rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
+	dev_info(rdev->dev, "GTT: %juM 0x%08jX - 0x%08jX\n",
+		(uintmax_t)rdev->mc.gtt_size >> 20, (uintmax_t)rdev->mc.gtt_start, (uintmax_t)rdev->mc.gtt_end);
+
+	/* workaround some hw issues */
+	if (rdev->family < CHIP_R200) {
+		WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
+	}
+	return 0;
+#else
+	return 0;
+#endif
+}
+
+void radeon_agp_resume(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+	int r;
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			dev_warn(rdev->dev, "radeon AGP reinit failed\n");
+	}
+#endif
+}
+
+void radeon_agp_fini(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+	if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+		drm_agp_release(rdev->ddev);
+	}
+#endif
+}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+	radeon_agp_fini(rdev);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_agp.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_asic.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_asic.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_asic.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1962 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_asic.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * Registers accessors functions.
+ */
+/**
+ * radeon_invalid_rreg - dummy reg read function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	panic("Invalid callback to read register 0x%04X\n", reg);
+	return 0;
+}
+
+/**
+ * radeon_invalid_wreg - dummy reg write function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	panic("Invalid callback to write register 0x%04X with 0x%08X\n",
+		  reg, v);
+}
+
+/**
+ * radeon_register_accessor_init - sets up the register accessor callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the register accessor callbacks for various register
+ * apertures.  Not all asics have all apertures (all asics).
+ */
+static void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+	rdev->mc_rreg = &radeon_invalid_rreg;
+	rdev->mc_wreg = &radeon_invalid_wreg;
+	rdev->pll_rreg = &radeon_invalid_rreg;
+	rdev->pll_wreg = &radeon_invalid_wreg;
+	rdev->pciep_rreg = &radeon_invalid_rreg;
+	rdev->pciep_wreg = &radeon_invalid_wreg;
+
+	/* Don't change order as we are overridding accessor. */
+	if (rdev->family < CHIP_RV515) {
+		rdev->pcie_reg_mask = 0xff;
+	} else {
+		rdev->pcie_reg_mask = 0x7ff;
+	}
+	/* FIXME: not sure here */
+	if (rdev->family <= CHIP_R580) {
+		rdev->pll_rreg = &r100_pll_rreg;
+		rdev->pll_wreg = &r100_pll_wreg;
+	}
+	if (rdev->family >= CHIP_R420) {
+		rdev->mc_rreg = &r420_mc_rreg;
+		rdev->mc_wreg = &r420_mc_wreg;
+	}
+	if (rdev->family >= CHIP_RV515) {
+		rdev->mc_rreg = &rv515_mc_rreg;
+		rdev->mc_wreg = &rv515_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+		rdev->mc_rreg = &rs400_mc_rreg;
+		rdev->mc_wreg = &rs400_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		rdev->mc_rreg = &rs690_mc_rreg;
+		rdev->mc_wreg = &rs690_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS600) {
+		rdev->mc_rreg = &rs600_mc_rreg;
+		rdev->mc_wreg = &rs600_mc_wreg;
+	}
+	if (rdev->family >= CHIP_R600) {
+		rdev->pciep_rreg = &r600_pciep_rreg;
+		rdev->pciep_wreg = &r600_pciep_wreg;
+	}
+}
+
+
+/* helper to disable agp */
+/**
+ * radeon_agp_disable - AGP disable helper function
+ *
+ * @rdev: radeon device pointer
+ *
+ * Removes AGP flags and changes the gart callbacks on AGP
+ * cards when using the internal gart rather than AGP (all asics).
+ */
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+	rdev->flags &= ~RADEON_IS_AGP;
+	if (rdev->family >= CHIP_R600) {
+		DRM_INFO("Forcing AGP to PCIE mode\n");
+		rdev->flags |= RADEON_IS_PCIE;
+	} else if (rdev->family >= CHIP_RV515 ||
+			rdev->family == CHIP_RV380 ||
+			rdev->family == CHIP_RV410 ||
+			rdev->family == CHIP_R423) {
+		DRM_INFO("Forcing AGP to PCIE mode\n");
+		rdev->flags |= RADEON_IS_PCIE;
+		rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+		rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+	} else {
+		DRM_INFO("Forcing AGP to PCI mode\n");
+		rdev->flags |= RADEON_IS_PCI;
+		rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+		rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+	}
+	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+/*
+ * ASIC
+ */
+static struct radeon_asic r100_asic = {
+	.init = &r100_init,
+	.fini = &r100_fini,
+	.suspend = &r100_suspend,
+	.resume = &r100_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r100_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r100_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r100_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r100_cs_parse,
+			.ring_start = &r100_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = NULL,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic r200_asic = {
+	.init = &r100_init,
+	.fini = &r100_fini,
+	.suspend = &r100_suspend,
+	.resume = &r100_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r100_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r100_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r100_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r100_cs_parse,
+			.ring_start = &r100_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic r300_asic = {
+	.init = &r300_init,
+	.fini = &r300_fini,
+	.suspend = &r300_suspend,
+	.resume = &r300_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic r300_asic_pcie = {
+	.init = &r300_init,
+	.fini = &r300_fini,
+	.suspend = &r300_suspend,
+	.resume = &r300_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic r420_asic = {
+	.init = &r420_init,
+	.fini = &r420_fini,
+	.suspend = &r420_suspend,
+	.resume = &r420_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic rs400_asic = {
+	.init = &rs400_init,
+	.fini = &rs400_fini,
+	.suspend = &rs400_suspend,
+	.resume = &rs400_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs400_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rs400_gart_tlb_flush,
+		.set_page = &rs400_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic rs600_asic = {
+	.init = &rs600_init,
+	.fini = &rs600_fini,
+	.suspend = &rs600_suspend,
+	.resume = &rs600_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs600_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rs600_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rs600_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic rs690_asic = {
+	.init = &rs690_init,
+	.fini = &rs690_fini,
+	.suspend = &rs690_suspend,
+	.resume = &rs690_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs690_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rs400_gart_tlb_flush,
+		.set_page = &rs400_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.bandwidth_update = &rs690_bandwidth_update,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r200_copy_dma,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic rv515_asic = {
+	.init = &rv515_init,
+	.fini = &rv515_fini,
+	.suspend = &rv515_suspend,
+	.resume = &rv515_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rv515_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &rv515_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.bandwidth_update = &rv515_bandwidth_update,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic r520_asic = {
+	.init = &r520_init,
+	.fini = &rv515_fini,
+	.suspend = &rv515_suspend,
+	.resume = &r520_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r520_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &rv515_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic r600_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.vga_set_state = &r600_vga_set_state,
+	.asic_reset = &r600_asic_reset,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r600_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &r600_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &r600_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic rs780_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.vga_set_state = &r600_vga_set_state,
+	.asic_reset = &r600_asic_reset,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r600_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &r600_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &r600_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rs690_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &rs780_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic rv770_asic = {
+	.init = &rv770_init,
+	.fini = &rv770_fini,
+	.suspend = &rv770_suspend,
+	.resume = &rv770_resume,
+	.asic_reset = &r600_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r600_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &r600_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &r600_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &rv770_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &rv770_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rv770_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rv770_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic evergreen_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &evergreen_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic sumo_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &evergreen_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic btc_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &evergreen_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &btc_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic cayman_asic = {
+	.init = &cayman_init,
+	.fini = &cayman_fini,
+	.suspend = &cayman_suspend,
+	.resume = &cayman_resume,
+	.asic_reset = &cayman_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &cayman_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &cayman_vm_init,
+		.fini = &cayman_vm_fini,
+		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.set_page = &cayman_vm_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP1_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP2_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &btc_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic trinity_asic = {
+	.init = &cayman_init,
+	.fini = &cayman_fini,
+	.suspend = &cayman_suspend,
+	.resume = &cayman_resume,
+	.asic_reset = &cayman_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &cayman_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &cayman_vm_init,
+		.fini = &cayman_vm_fini,
+		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.set_page = &cayman_vm_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP1_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP2_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &dce6_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic si_asic = {
+	.init = &si_init,
+	.fini = &si_fini,
+	.suspend = &si_suspend,
+	.resume = &si_resume,
+	.asic_reset = &si_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &si_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &si_vm_init,
+		.fini = &si_vm_fini,
+		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.set_page = &si_vm_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &si_ring_ib_execute,
+			.ib_parse = &si_ib_parse,
+			.emit_fence = &si_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &si_gpu_is_lockup,
+			.vm_flush = &si_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP1_INDEX] = {
+			.ib_execute = &si_ring_ib_execute,
+			.ib_parse = &si_ib_parse,
+			.emit_fence = &si_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &si_gpu_is_lockup,
+			.vm_flush = &si_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP2_INDEX] = {
+			.ib_execute = &si_ring_ib_execute,
+			.ib_parse = &si_ib_parse,
+			.emit_fence = &si_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &si_gpu_is_lockup,
+			.vm_flush = &si_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &si_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &si_dma_vm_flush,
+		}
+	},
+	.irq = {
+		.set = &si_irq_set,
+		.process = &si_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &dce6_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = NULL,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &si_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &si_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+/**
+ * radeon_asic_init - register asic specific callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Registers the appropriate asic specific callbacks for each
+ * chip family.  Also sets other asics specific info like the number
+ * of crtcs and the register aperture accessors (all asics).
+ * Returns 0 for success.
+ */
+int radeon_asic_init(struct radeon_device *rdev)
+{
+	radeon_register_accessor_init(rdev);
+
+	/* set the number of crtcs */
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		rdev->num_crtc = 1;
+	else
+		rdev->num_crtc = 2;
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+		rdev->asic = &r100_asic;
+		break;
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+		rdev->asic = &r200_asic;
+		break;
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+	case CHIP_RV380:
+		if (rdev->flags & RADEON_IS_PCIE)
+			rdev->asic = &r300_asic_pcie;
+		else
+			rdev->asic = &r300_asic;
+		break;
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+		rdev->asic = &r420_asic;
+		/* handle macs */
+		if (rdev->bios == NULL) {
+			rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
+			rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
+			rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
+			rdev->asic->pm.set_memory_clock = NULL;
+			rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
+		}
+		break;
+	case CHIP_RS400:
+	case CHIP_RS480:
+		rdev->asic = &rs400_asic;
+		break;
+	case CHIP_RS600:
+		rdev->asic = &rs600_asic;
+		break;
+	case CHIP_RS690:
+	case CHIP_RS740:
+		rdev->asic = &rs690_asic;
+		break;
+	case CHIP_RV515:
+		rdev->asic = &rv515_asic;
+		break;
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		rdev->asic = &r520_asic;
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RV670:
+		rdev->asic = &r600_asic;
+		break;
+	case CHIP_RS780:
+	case CHIP_RS880:
+		rdev->asic = &rs780_asic;
+		break;
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		rdev->asic = &rv770_asic;
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* set num crtcs */
+		if (rdev->family == CHIP_CEDAR)
+			rdev->num_crtc = 4;
+		else
+			rdev->num_crtc = 6;
+		rdev->asic = &evergreen_asic;
+		break;
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+		rdev->asic = &sumo_asic;
+		break;
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+		/* set num crtcs */
+		if (rdev->family == CHIP_CAICOS)
+			rdev->num_crtc = 4;
+		else
+			rdev->num_crtc = 6;
+		rdev->asic = &btc_asic;
+		break;
+	case CHIP_CAYMAN:
+		rdev->asic = &cayman_asic;
+		/* set num crtcs */
+		rdev->num_crtc = 6;
+		break;
+	case CHIP_ARUBA:
+		rdev->asic = &trinity_asic;
+		/* set num crtcs */
+		rdev->num_crtc = 4;
+		break;
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+		rdev->asic = &si_asic;
+		/* set num crtcs */
+		rdev->num_crtc = 6;
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		rdev->asic->pm.get_memory_clock = NULL;
+		rdev->asic->pm.set_memory_clock = NULL;
+	}
+
+	return 0;
+}
+


Property changes on: trunk/sys/dev/drm2/radeon/radeon_asic.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_asic.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_asic.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_asic.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,533 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_asic.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef __RADEON_ASIC_H__
+#define __RADEON_ASIC_H__
+
+/*
+ * common functions
+ */
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
+void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
+void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+
+void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
+void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
+
+
+/*
+ * r100,rv100,rs100,rv200,rs200
+ */
+struct r100_mc_save {
+	u32	GENMO_WT;
+	u32	CRTC_EXT_CNTL;
+	u32	CRTC_GEN_CNTL;
+	u32	CRTC2_GEN_CNTL;
+	u32	CUR_OFFSET;
+	u32	CUR2_OFFSET;
+};
+int r100_init(struct radeon_device *rdev);
+void r100_fini(struct radeon_device *rdev);
+int r100_suspend(struct radeon_device *rdev);
+int r100_resume(struct radeon_device *rdev);
+void r100_vga_set_state(struct radeon_device *rdev, bool state);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int r100_asic_reset(struct radeon_device *rdev);
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+int r100_irq_set(struct radeon_device *rdev);
+irqreturn_t r100_irq_process(struct radeon_device *rdev);
+void r100_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence);
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *cp,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait);
+int r100_cs_parse(struct radeon_cs_parser *p);
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
+int r100_copy_blit(struct radeon_device *rdev,
+		   uint64_t src_offset,
+		   uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence);
+int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size);
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r100_bandwidth_update(struct radeon_device *rdev);
+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+int r100_pci_gart_init(struct radeon_device *rdev);
+void r100_pci_gart_fini(struct radeon_device *rdev);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r100_irq_disable(struct radeon_device *rdev);
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_vram_init_sizes(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+void r100_vga_render_disable(struct radeon_device *rdev);
+void r100_restore_sanity(struct radeon_device *rdev);
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+					 struct radeon_cs_packet *pkt,
+					 struct radeon_bo *robj);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+			  struct radeon_cs_packet *pkt,
+			  const unsigned *auth, unsigned n,
+			  radeon_packet0_check_t check);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+			 struct radeon_cs_packet *pkt,
+			 unsigned idx);
+void r100_enable_bm(struct radeon_device *rdev);
+void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r200,rv250,rs300,rv280
+ */
+extern int r200_copy_dma(struct radeon_device *rdev,
+			 uint64_t src_offset,
+			 uint64_t dst_offset,
+			 unsigned num_gpu_pages,
+			 struct radeon_fence **fence);
+void r200_set_safe_registers(struct radeon_device *rdev);
+
+/*
+ * r300,r350,rv350,rv380
+ */
+extern int r300_init(struct radeon_device *rdev);
+extern void r300_fini(struct radeon_device *rdev);
+extern int r300_suspend(struct radeon_device *rdev);
+extern int r300_resume(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
+extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+extern void r300_fence_ring_emit(struct radeon_device *rdev,
+				struct radeon_fence *fence);
+extern int r300_cs_parse(struct radeon_cs_parser *p);
+extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
+
+/*
+ * r420,r423,rv410
+ */
+extern int r420_init(struct radeon_device *rdev);
+extern void r420_fini(struct radeon_device *rdev);
+extern int r420_suspend(struct radeon_device *rdev);
+extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
+
+/*
+ * rs400,rs480
+ */
+extern int rs400_init(struct radeon_device *rdev);
+extern void rs400_fini(struct radeon_device *rdev);
+extern int rs400_suspend(struct radeon_device *rdev);
+extern int rs400_resume(struct radeon_device *rdev);
+void rs400_gart_tlb_flush(struct radeon_device *rdev);
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rs600.
+ */
+extern int rs600_asic_reset(struct radeon_device *rdev);
+extern int rs600_init(struct radeon_device *rdev);
+extern void rs600_fini(struct radeon_device *rdev);
+extern int rs600_suspend(struct radeon_device *rdev);
+extern int rs600_resume(struct radeon_device *rdev);
+int rs600_irq_set(struct radeon_device *rdev);
+irqreturn_t rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
+void rs600_gart_tlb_flush(struct radeon_device *rdev);
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rs690,rs740
+ */
+int rs690_init(struct radeon_device *rdev);
+void rs690_fini(struct radeon_device *rdev);
+int rs690_resume(struct radeon_device *rdev);
+int rs690_suspend(struct radeon_device *rdev);
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+					struct drm_display_mode *mode1,
+					struct drm_display_mode *mode2);
+extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rv515
+ */
+struct rv515_mc_save {
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	bool crtc_enabled[2];
+};
+
+int rv515_init(struct radeon_device *rdev);
+void rv515_fini(struct radeon_device *rdev);
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+void rv515_bandwidth_update(struct radeon_device *rdev);
+int rv515_resume(struct radeon_device *rdev);
+int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r520,rv530,rv560,rv570,r580
+ */
+int r520_init(struct radeon_device *rdev);
+int r520_resume(struct radeon_device *rdev);
+int r520_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
+ */
+int r600_init(struct radeon_device *rdev);
+void r600_fini(struct radeon_device *rdev);
+int r600_suspend(struct radeon_device *rdev);
+int r600_resume(struct radeon_device *rdev);
+void r600_vga_set_state(struct radeon_device *rdev, bool state);
+int r600_wb_init(struct radeon_device *rdev);
+void r600_wb_fini(struct radeon_device *rdev);
+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
+uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
+void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
+void r600_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence);
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *cp,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_asic_reset(struct radeon_device *rdev);
+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size);
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_copy_blit(struct radeon_device *rdev,
+		   uint64_t src_offset, uint64_t dst_offset,
+		   unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages, struct radeon_fence **fence);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+bool r600_card_posted(struct radeon_device *rdev);
+void r600_cp_stop(struct radeon_device *rdev);
+int r600_cp_start(struct radeon_device *rdev);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
+int r600_cp_resume(struct radeon_device *rdev);
+void r600_cp_fini(struct radeon_device *rdev);
+int r600_count_pipe_bits(uint32_t val);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+int r600_pcie_gart_init(struct radeon_device *rdev);
+void r600_scratch_init(struct radeon_device *rdev);
+int r600_blit_init(struct radeon_device *rdev);
+void r600_blit_fini(struct radeon_device *rdev);
+int r600_init_microcode(struct radeon_device *rdev);
+void r600_fini_microcode(struct radeon_device *rdev);
+/* r600 irq */
+irqreturn_t r600_irq_process(struct radeon_device *rdev);
+int r600_irq_init(struct radeon_device *rdev);
+void r600_irq_fini(struct radeon_device *rdev);
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_irq_set(struct radeon_device *rdev);
+void r600_irq_suspend(struct radeon_device *rdev);
+void r600_disable_interrupts(struct radeon_device *rdev);
+void r600_rlc_stop(struct radeon_device *rdev);
+/* r600 audio */
+int r600_audio_init(struct radeon_device *rdev);
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+struct r600_audio r600_audio_status(struct radeon_device *rdev);
+void r600_audio_fini(struct radeon_device *rdev);
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+/* r600 blit */
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+			   struct radeon_fence **fence, struct radeon_sa_bo **vb,
+			   struct radeon_semaphore **sem);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+			 struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
+void r600_kms_blit_copy(struct radeon_device *rdev,
+			u64 src_gpu_addr, u64 dst_gpu_addr,
+			unsigned num_gpu_pages,
+			struct radeon_sa_bo *vb);
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
+
+/*
+ * rv770,rv730,rv710,rv740
+ */
+int rv770_init(struct radeon_device *rdev);
+void rv770_fini(struct radeon_device *rdev);
+int rv770_suspend(struct radeon_device *rdev);
+int rv770_resume(struct radeon_device *rdev);
+void rv770_pm_misc(struct radeon_device *rdev);
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+void r700_cp_stop(struct radeon_device *rdev);
+void r700_cp_fini(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		   struct radeon_fence **fence);
+
+/*
+ * evergreen
+ */
+struct evergreen_mc_save {
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	bool crtc_enabled[RADEON_MAX_CRTCS];
+};
+
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int evergreen_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+int evergreen_suspend(struct radeon_device *rdev);
+int evergreen_resume(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int evergreen_asic_reset(struct radeon_device *rdev);
+void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+void evergreen_hpd_init(struct radeon_device *rdev);
+void evergreen_hpd_fini(struct radeon_device *rdev);
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+				enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+irqreturn_t evergreen_irq_process(struct radeon_device *rdev);
+extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void sumo_pm_init_profile(struct radeon_device *rdev);
+extern void btc_pm_init_profile(struct radeon_device *rdev);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
+void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_blit_init(struct radeon_device *rdev);
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+				   struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+				   struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+		       uint64_t src_offset, uint64_t dst_offset,
+		       unsigned num_gpu_pages,
+		       struct radeon_fence **fence);
+void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+void evergreen_mc_program(struct radeon_device *rdev);
+int evergreen_mc_init(struct radeon_device *rdev);
+void evergreen_irq_suspend(struct radeon_device *rdev);
+
+/*
+ * cayman
+ */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+			    struct radeon_fence *fence);
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int cayman_init(struct radeon_device *rdev);
+void cayman_fini(struct radeon_device *rdev);
+int cayman_suspend(struct radeon_device *rdev);
+int cayman_resume(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+			uint64_t addr, unsigned count,
+			uint32_t incr, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+				struct radeon_ib *ib);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+				     int ring, u32 cp_int_cntl);
+
+/* DCE6 - SI */
+void dce6_bandwidth_update(struct radeon_device *rdev);
+
+/*
+ * si
+ */
+void si_fence_ring_emit(struct radeon_device *rdev,
+			struct radeon_fence *fence);
+void si_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int si_init(struct radeon_device *rdev);
+void si_fini(struct radeon_device *rdev);
+int si_suspend(struct radeon_device *rdev);
+int si_resume(struct radeon_device *rdev);
+bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int si_asic_reset(struct radeon_device *rdev);
+void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int si_irq_set(struct radeon_device *rdev);
+irqreturn_t si_irq_process(struct radeon_device *rdev);
+int si_vm_init(struct radeon_device *rdev);
+void si_vm_fini(struct radeon_device *rdev);
+void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+		    uint64_t addr, unsigned count,
+		    uint32_t incr, uint32_t flags);
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+		uint64_t src_offset, uint64_t dst_offset,
+		unsigned num_gpu_pages,
+		struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void si_rlc_fini(struct radeon_device *rdev);
+int si_rlc_init(struct radeon_device *rdev);
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_asic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_atombios.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_atombios.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_atombios.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,3226 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_atombios.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h" /* Declares several prototypes; clang is pleased. */
+
+#include "atom.h"
+#include "atom-bits.h"
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+			uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+extern void
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			uint32_t supported_device, u16 caps);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+			  uint32_t connector_id,
+			  uint32_t supported_device,
+			  int connector_type,
+			  struct radeon_i2c_bus_rec *i2c_bus,
+			  uint32_t igp_lane_info,
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd,
+			  struct radeon_router *router);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			  uint32_t supported_device);
+#endif
+
+/* local */
+static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+				    u16 voltage_id, u16 *voltage);
+
+union atom_supported_devices {
+	struct _ATOM_SUPPORTED_DEVICES_INFO info;
+	struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
+	struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
+};
+
+static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
+					  ATOM_GPIO_I2C_ASSIGMENT *gpio,
+					  u8 index)
+{
+	/* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+	if ((rdev->family == CHIP_R420) ||
+	    (rdev->family == CHIP_R423) ||
+	    (rdev->family == CHIP_RV410)) {
+		if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+			gpio->ucClkMaskShift = 0x19;
+			gpio->ucDataMaskShift = 0x18;
+		}
+	}
+
+	/* some evergreen boards have bad data for this entry */
+	if (ASIC_IS_DCE4(rdev)) {
+		if ((index == 7) &&
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+		    (gpio->sucI2cId.ucAccess == 0)) {
+			gpio->sucI2cId.ucAccess = 0x97;
+			gpio->ucDataMaskShift = 8;
+			gpio->ucDataEnShift = 8;
+			gpio->ucDataY_Shift = 8;
+			gpio->ucDataA_Shift = 8;
+		}
+	}
+
+	/* some DCE3 boards have bad data for this entry */
+	if (ASIC_IS_DCE3(rdev)) {
+		if ((index == 4) &&
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+		    (gpio->sucI2cId.ucAccess == 0x94))
+			gpio->sucI2cId.ucAccess = 0x14;
+	}
+}
+
+static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+	struct radeon_i2c_bus_rec i2c;
+
+	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+	i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+	i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+	i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+	i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+	i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+	i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+	i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+	i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+	i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+	i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+	i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+	i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+	i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+	i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+	i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+	i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+	if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+		i2c.hw_capable = true;
+	else
+		i2c.hw_capable = false;
+
+	if (gpio->sucI2cId.ucAccess == 0xa0)
+		i2c.mm_i2c = true;
+	else
+		i2c.mm_i2c = false;
+
+	i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+	if (i2c.mask_clk_reg)
+		i2c.valid = true;
+	else
+		i2c.valid = false;
+
+	return i2c;
+}
+
+static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+							       uint8_t id)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+	struct radeon_i2c_bus_rec i2c;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+	uint16_t data_offset, size;
+	int i, num_indices;
+
+	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+	i2c.valid = false;
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)((char *)ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+		for (i = 0; i < num_indices; i++) {
+			gpio = &i2c_info->asGPIO_Info[i];
+
+			radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+
+			if (gpio->sucI2cId.ucAccess == id) {
+				i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+				break;
+			}
+		}
+	}
+
+	return i2c;
+}
+
+void radeon_atombios_i2c_init(struct radeon_device *rdev)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+	struct radeon_i2c_bus_rec i2c;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+	uint16_t data_offset, size;
+	int i, num_indices;
+	char stmp[32];
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)((char *)ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+		for (i = 0; i < num_indices; i++) {
+			gpio = &i2c_info->asGPIO_Info[i];
+
+			radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+
+			i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+
+			if (i2c.valid) {
+				sprintf(stmp, "0x%x", i2c.i2c_id);
+				rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
+			}
+		}
+	}
+}
+
+static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+							u8 id)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	struct radeon_gpio_rec gpio;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+	struct _ATOM_GPIO_PIN_LUT *gpio_info;
+	ATOM_GPIO_PIN_ASSIGNMENT *pin;
+	u16 data_offset, size;
+	int i, num_indices;
+
+	memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+	gpio.valid = false;
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		gpio_info = (struct _ATOM_GPIO_PIN_LUT *)((char *)ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+		for (i = 0; i < num_indices; i++) {
+			pin = &gpio_info->asGPIO_Pin[i];
+			if (id == pin->ucGPIO_ID) {
+				gpio.id = pin->ucGPIO_ID;
+				gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
+				gpio.mask = (1 << pin->ucGpioPinBitShift);
+				gpio.valid = true;
+				break;
+			}
+		}
+	}
+
+	return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+							    struct radeon_gpio_rec *gpio)
+{
+	struct radeon_hpd hpd;
+	u32 reg;
+
+	memset(&hpd, 0, sizeof(struct radeon_hpd));
+
+	if (ASIC_IS_DCE6(rdev))
+		reg = SI_DC_GPIO_HPD_A;
+	else if (ASIC_IS_DCE4(rdev))
+		reg = EVERGREEN_DC_GPIO_HPD_A;
+	else
+		reg = AVIVO_DC_GPIO_HPD_A;
+
+	hpd.gpio = *gpio;
+	if (gpio->reg == reg) {
+		switch(gpio->mask) {
+		case (1 << 0):
+			hpd.hpd = RADEON_HPD_1;
+			break;
+		case (1 << 8):
+			hpd.hpd = RADEON_HPD_2;
+			break;
+		case (1 << 16):
+			hpd.hpd = RADEON_HPD_3;
+			break;
+		case (1 << 24):
+			hpd.hpd = RADEON_HPD_4;
+			break;
+		case (1 << 26):
+			hpd.hpd = RADEON_HPD_5;
+			break;
+		case (1 << 28):
+			hpd.hpd = RADEON_HPD_6;
+			break;
+		default:
+			hpd.hpd = RADEON_HPD_NONE;
+			break;
+		}
+	} else
+		hpd.hpd = RADEON_HPD_NONE;
+	return hpd;
+}
+
+static bool radeon_atom_apply_quirks(struct drm_device *dev,
+				     uint32_t supported_device,
+				     int *connector_type,
+				     struct radeon_i2c_bus_rec *i2c_bus,
+				     uint16_t *line_mux,
+				     struct radeon_hpd *hpd)
+{
+
+	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+	if ((dev->pci_device == 0x791e) &&
+	    (dev->pci_subvendor == 0x1043) &&
+	    (dev->pci_subdevice == 0x826d)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
+	/* Asrock RS600 board lists the DVI port as HDMI */
+	if ((dev->pci_device == 0x7941) &&
+	    (dev->pci_subvendor == 0x1849) &&
+	    (dev->pci_subdevice == 0x7941)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
+	/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+	if ((dev->pci_device == 0x796e) &&
+	    (dev->pci_subvendor == 0x1462) &&
+	    (dev->pci_subdevice == 0x7302)) {
+		if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			return false;
+	}
+
+	/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
+	if ((dev->pci_device == 0x7941) &&
+	    (dev->pci_subvendor == 0x147b) &&
+	    (dev->pci_subdevice == 0x2412)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_DVII)
+			return false;
+	}
+
+	/* Falcon NW laptop lists vga ddc line for LVDS */
+	if ((dev->pci_device == 0x5653) &&
+	    (dev->pci_subvendor == 0x1462) &&
+	    (dev->pci_subdevice == 0x0291)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			i2c_bus->valid = false;
+			*line_mux = 53;
+		}
+	}
+
+	/* HIS X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pci_device == 0x7146) &&
+	    (dev->pci_subvendor == 0x17af) &&
+	    (dev->pci_subdevice == 0x2058)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+	/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pci_device == 0x7142) &&
+	    (dev->pci_subvendor == 0x1458) &&
+	    (dev->pci_subdevice == 0x2134)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+
+	/* Funky macbooks */
+	if ((dev->pci_device == 0x71C5) &&
+	    (dev->pci_subvendor == 0x106b) &&
+	    (dev->pci_subdevice == 0x0080)) {
+		if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
+		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+			return false;
+		if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
+			*line_mux = 0x90;
+	}
+
+	/* mac rv630, rv730, others */
+	if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+	    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+		*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+		*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+	}
+
+	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
+	if ((dev->pci_device == 0x9598) &&
+	    (dev->pci_subvendor == 0x1043) &&
+	    (dev->pci_subdevice == 0x01da)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* ASUS HD 3600 board lists the DVI port as HDMI */
+	if ((dev->pci_device == 0x9598) &&
+	    (dev->pci_subvendor == 0x1043) &&
+	    (dev->pci_subdevice == 0x01e4)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* ASUS HD 3450 board lists the DVI port as HDMI */
+	if ((dev->pci_device == 0x95C5) &&
+	    (dev->pci_subvendor == 0x1043) &&
+	    (dev->pci_subdevice == 0x01e2)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* some BIOSes seem to report DAC on HDMI - usually this is a board with
+	 * HDMI + VGA reporting as HDMI
+	 */
+	if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+		if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) {
+			*connector_type = DRM_MODE_CONNECTOR_VGA;
+			*line_mux = 0;
+		}
+	}
+
+	/* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port
+	 * on the laptop and a DVI port on the docking station and
+	 * both share the same encoder, hpd pin, and ddc line.
+	 * So while the bios table is technically correct,
+	 * we drop the DVI port here since xrandr has no concept of
+	 * encoders and will try and drive both connectors
+	 * with different crtcs which isn't possible on the hardware
+	 * side and leaves no crtcs for LVDS or VGA.
+	 */
+	if (((dev->pci_device == 0x95c4) || (dev->pci_device == 0x9591)) &&
+	    (dev->pci_subvendor == 0x1025) &&
+	    (dev->pci_subdevice == 0x013c)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
+			/* actually it's a DVI-D port not DVI-I */
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+			return false;
+		}
+	}
+
+	/* XFX Pine Group device rv730 reports no VGA DDC lines
+	 * even though they are wired up to record 0x93
+	 */
+	if ((dev->pci_device == 0x9498) &&
+	    (dev->pci_subvendor == 0x1682) &&
+	    (dev->pci_subdevice == 0x2452) &&
+	    (i2c_bus->valid == false) &&
+	    !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
+		struct radeon_device *rdev = dev->dev_private;
+		*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+	}
+
+	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+	if (((dev->pci_device == 0x9802) || (dev->pci_device == 0x9806)) &&
+	    (dev->pci_subvendor == 0x1734) &&
+	    (dev->pci_subdevice == 0x11bd)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+			*line_mux = 0x3103;
+		} else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+
+	return true;
+}
+
+const int supported_devices_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_DVIA,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_LVDS,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_HDMIA,
+	DRM_MODE_CONNECTOR_HDMIB,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_DisplayPort
+};
+
+const uint16_t supported_devices_connector_object_id_convert[] = {
+	CONNECTOR_OBJECT_ID_NONE,
+	CONNECTOR_OBJECT_ID_VGA,
+	CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
+	CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */
+	CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */
+	CONNECTOR_OBJECT_ID_COMPOSITE,
+	CONNECTOR_OBJECT_ID_SVIDEO,
+	CONNECTOR_OBJECT_ID_LVDS,
+	CONNECTOR_OBJECT_ID_9PIN_DIN,
+	CONNECTOR_OBJECT_ID_9PIN_DIN,
+	CONNECTOR_OBJECT_ID_DISPLAYPORT,
+	CONNECTOR_OBJECT_ID_HDMI_TYPE_A,
+	CONNECTOR_OBJECT_ID_HDMI_TYPE_B,
+	CONNECTOR_OBJECT_ID_SVIDEO
+};
+
+const int object_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_HDMIA,
+	DRM_MODE_CONNECTOR_HDMIB,
+	DRM_MODE_CONNECTOR_LVDS,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DisplayPort,
+	DRM_MODE_CONNECTOR_eDP,
+	DRM_MODE_CONNECTOR_Unknown
+};
+
+bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, Object_Header);
+	u16 size, data_offset;
+	u8 frev, crev;
+	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+	ATOM_ENCODER_OBJECT_TABLE *enc_obj;
+	ATOM_OBJECT_TABLE *router_obj;
+	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+	ATOM_OBJECT_HEADER *obj_header;
+	int i, j, k, path_size, device_support;
+	int connector_type;
+	u16 igp_lane_info, conn_id, connector_object_id;
+	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_router router;
+	struct radeon_gpio_rec gpio;
+	struct radeon_hpd hpd;
+
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+		return false;
+
+	if (crev < 2)
+		return false;
+
+	obj_header = (ATOM_OBJECT_HEADER *) ((char *)ctx->bios + data_offset);
+	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+	    ((char *)ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usDisplayPathTableOffset));
+	con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
+	    ((char *)ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+	enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+	    ((char *)ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usEncoderObjectTableOffset));
+	router_obj = (ATOM_OBJECT_TABLE *)
+		((char *)ctx->bios + data_offset +
+		 le16_to_cpu(obj_header->usRouterObjectTableOffset));
+	device_support = le16_to_cpu(obj_header->usDeviceSupport);
+
+	path_size = 0;
+	for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
+		uint8_t *addr = (uint8_t *) path_obj->asDispPath;
+		ATOM_DISPLAY_OBJECT_PATH *path;
+		addr += path_size;
+		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
+		path_size += le16_to_cpu(path->usSize);
+
+		if (device_support & le16_to_cpu(path->usDeviceTag)) {
+			uint8_t con_obj_id, con_obj_num, con_obj_type;
+
+			con_obj_id =
+			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
+			    >> OBJECT_ID_SHIFT;
+			con_obj_num =
+			    (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
+			    >> ENUM_ID_SHIFT;
+			con_obj_type =
+			    (le16_to_cpu(path->usConnObjectId) &
+			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+			/* TODO CV support */
+			if (le16_to_cpu(path->usDeviceTag) ==
+				ATOM_DEVICE_CV_SUPPORT)
+				continue;
+
+			/* IGP chips */
+			if ((rdev->flags & RADEON_IS_IGP) &&
+			    (con_obj_id ==
+			     CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
+				uint16_t igp_offset = 0;
+				ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj;
+
+				index =
+				    GetIndexIntoMasterTable(DATA,
+							    IntegratedSystemInfo);
+
+				if (atom_parse_data_header(ctx, index, &size, &frev,
+							   &crev, &igp_offset)) {
+
+					if (crev >= 2) {
+						igp_obj =
+							(ATOM_INTEGRATED_SYSTEM_INFO_V2
+							 *) ((char *)ctx->bios + igp_offset);
+
+						if (igp_obj) {
+							uint32_t slot_config, ct;
+
+							if (con_obj_num == 1)
+								slot_config =
+									igp_obj->
+									ulDDISlot1Config;
+							else
+								slot_config =
+									igp_obj->
+									ulDDISlot2Config;
+
+							ct = (slot_config >> 16) & 0xff;
+							connector_type =
+								object_connector_convert
+								[ct];
+							connector_object_id = ct;
+							igp_lane_info =
+								slot_config & 0xffff;
+						} else
+							continue;
+					} else
+						continue;
+				} else {
+					igp_lane_info = 0;
+					connector_type =
+						object_connector_convert[con_obj_id];
+					connector_object_id = con_obj_id;
+				}
+			} else {
+				igp_lane_info = 0;
+				connector_type =
+				    object_connector_convert[con_obj_id];
+				connector_object_id = con_obj_id;
+			}
+
+			if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+				continue;
+
+			router.ddc_valid = false;
+			router.cd_valid = false;
+			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
+
+				grph_obj_id =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+				grph_obj_num =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+				grph_obj_type =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+					for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+						u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								((char *)ctx->bios + data_offset +
+								 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+							ATOM_ENCODER_CAP_RECORD *cap_record;
+							u16 caps = 0;
+
+							while (record->ucRecordSize > 0 &&
+							       record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_ENCODER_CAP_RECORD_TYPE:
+									cap_record =(ATOM_ENCODER_CAP_RECORD *)
+										record;
+									caps = le16_to_cpu(cap_record->usEncoderCap);
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+							radeon_add_atom_encoder(dev,
+										encoder_obj,
+										le16_to_cpu
+										(path->
+										 usDeviceTag),
+										caps);
+						}
+					}
+				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
+					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
+						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								((char *)ctx->bios + data_offset +
+								 le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
+							ATOM_I2C_RECORD *i2c_record;
+							ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+							ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+							ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
+							ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
+								(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+								((char *)ctx->bios + data_offset +
+								 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+							int enum_id;
+
+							router.router_id = router_obj_id;
+							for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
+							     enum_id++) {
+								if (le16_to_cpu(path->usConnObjectId) ==
+								    le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
+									break;
+							}
+
+							while (record->ucRecordSize > 0 &&
+							       record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_I2C_RECORD_TYPE:
+									i2c_record =
+										(ATOM_I2C_RECORD *)
+										record;
+									i2c_config =
+										(ATOM_I2C_ID_CONFIG_ACCESS *)
+										&i2c_record->sucI2cId;
+									router.i2c_info =
+										radeon_lookup_i2c_gpio(rdev,
+												       i2c_config->
+												       ucAccess);
+									router.i2c_addr = i2c_record->ucI2CAddr >> 1;
+									break;
+								case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
+									ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
+										record;
+									router.ddc_valid = true;
+									router.ddc_mux_type = ddc_path->ucMuxType;
+									router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+									router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+									break;
+								case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+									cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+										record;
+									router.cd_valid = true;
+									router.cd_mux_type = cd_path->ucMuxType;
+									router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+									router.cd_mux_state = cd_path->ucMuxState[enum_id];
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+						}
+					}
+				}
+			}
+
+			/* look up gpio for ddc, hpd */
+			ddc_bus.valid = false;
+			hpd.hpd = RADEON_HPD_NONE;
+			if ((le16_to_cpu(path->usDeviceTag) &
+			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
+				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
+					if (le16_to_cpu(path->usConnObjectId) ==
+					    le16_to_cpu(con_obj->asObjects[j].
+							usObjectID)) {
+						ATOM_COMMON_RECORD_HEADER
+						    *record =
+						    (ATOM_COMMON_RECORD_HEADER
+						     *)
+						    ((char *)ctx->bios + data_offset +
+						     le16_to_cpu(con_obj->
+								 asObjects[j].
+								 usRecordOffset));
+						ATOM_I2C_RECORD *i2c_record;
+						ATOM_HPD_INT_RECORD *hpd_record;
+						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+
+						while (record->ucRecordSize > 0 &&
+						       record->ucRecordType > 0 &&
+						       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+							switch (record->ucRecordType) {
+							case ATOM_I2C_RECORD_TYPE:
+								i2c_record =
+								    (ATOM_I2C_RECORD *)
+									record;
+								i2c_config =
+									(ATOM_I2C_ID_CONFIG_ACCESS *)
+									&i2c_record->sucI2cId;
+								ddc_bus = radeon_lookup_i2c_gpio(rdev,
+												 i2c_config->
+												 ucAccess);
+								break;
+							case ATOM_HPD_INT_RECORD_TYPE:
+								hpd_record =
+									(ATOM_HPD_INT_RECORD *)
+									record;
+								gpio = radeon_lookup_gpio(rdev,
+											  hpd_record->ucHPDIntGPIOID);
+								hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+								hpd.plugged_state = hpd_record->ucPlugged_PinState;
+								break;
+							}
+							record =
+							    (ATOM_COMMON_RECORD_HEADER
+							     *) ((char *)record
+								 +
+								 record->
+								 ucRecordSize);
+						}
+						break;
+					}
+				}
+			}
+
+			/* needed for aux chan transactions */
+			ddc_bus.hpd = hpd.hpd;
+
+			conn_id = le16_to_cpu(path->usConnObjectId);
+
+			if (!radeon_atom_apply_quirks
+			    (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
+			     &ddc_bus, &conn_id, &hpd))
+				continue;
+
+			radeon_add_atom_connector(dev,
+						  conn_id,
+						  le16_to_cpu(path->
+							      usDeviceTag),
+						  connector_type, &ddc_bus,
+						  igp_lane_info,
+						  connector_object_id,
+						  &hpd,
+						  &router);
+
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	return true;
+}
+
+static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
+						 int connector_type,
+						 uint16_t devices)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		return supported_devices_connector_object_id_convert
+			[connector_type];
+	} else if (((connector_type == DRM_MODE_CONNECTOR_DVII) ||
+		    (connector_type == DRM_MODE_CONNECTOR_DVID)) &&
+		   (devices & ATOM_DEVICE_DFP2_SUPPORT))  {
+		struct radeon_mode_info *mode_info = &rdev->mode_info;
+		struct atom_context *ctx = mode_info->atom_context;
+		int index = GetIndexIntoMasterTable(DATA, XTMDS_Info);
+		uint16_t size, data_offset;
+		uint8_t frev, crev;
+		ATOM_XTMDS_INFO *xtmds;
+
+		if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
+			xtmds = (ATOM_XTMDS_INFO *)((char *)ctx->bios + data_offset);
+
+			if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+				if (connector_type == DRM_MODE_CONNECTOR_DVII)
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+				else
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+			} else {
+				if (connector_type == DRM_MODE_CONNECTOR_DVII)
+					return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				else
+					return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+			}
+		} else
+			return supported_devices_connector_object_id_convert
+				[connector_type];
+	} else {
+		return supported_devices_connector_object_id_convert
+			[connector_type];
+	}
+}
+
+struct bios_connector {
+	bool valid;
+	uint16_t line_mux;
+	uint16_t devices;
+	int connector_type;
+	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_hpd hpd;
+};
+
+bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+								 drm_device
+								 *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo);
+	uint16_t size, data_offset;
+	uint8_t frev, crev;
+	uint16_t device_support;
+	uint8_t dac;
+	union atom_supported_devices *supported_devices;
+	int i, j, max_device;
+	struct bios_connector *bios_connectors;
+	size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
+	struct radeon_router router;
+
+	router.ddc_valid = false;
+	router.cd_valid = false;
+
+	bios_connectors = malloc(bc_size, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!bios_connectors)
+		return false;
+
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+				    &data_offset)) {
+		free(bios_connectors, DRM_MEM_DRIVER);
+		return false;
+	}
+
+	supported_devices =
+	    (union atom_supported_devices *)((char *)ctx->bios + data_offset);
+
+	device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
+
+	if (frev > 1)
+		max_device = ATOM_MAX_SUPPORTED_DEVICE;
+	else
+		max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+	for (i = 0; i < max_device; i++) {
+		ATOM_CONNECTOR_INFO_I2C ci =
+		    supported_devices->info.asConnInfo[i];
+
+		bios_connectors[i].valid = false;
+
+		if (!(device_support & (1 << i))) {
+			continue;
+		}
+
+		if (i == ATOM_DEVICE_CV_INDEX) {
+			DRM_DEBUG_KMS("Skipping Component Video\n");
+			continue;
+		}
+
+		bios_connectors[i].connector_type =
+		    supported_devices_connector_convert[ci.sucConnectorInfo.
+							sbfAccess.
+							bfConnectorType];
+
+		if (bios_connectors[i].connector_type ==
+		    DRM_MODE_CONNECTOR_Unknown)
+			continue;
+
+		dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
+
+		bios_connectors[i].line_mux =
+			ci.sucI2cId.ucAccess;
+
+		/* give tv unique connector ids */
+		if (i == ATOM_DEVICE_TV1_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 50;
+		} else if (i == ATOM_DEVICE_TV2_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 51;
+		} else if (i == ATOM_DEVICE_CV_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 52;
+		} else
+			bios_connectors[i].ddc_bus =
+			    radeon_lookup_i2c_gpio(rdev,
+						   bios_connectors[i].line_mux);
+
+		if ((crev > 1) && (frev > 1)) {
+			u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+			switch (isb) {
+			case 0x4:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+				break;
+			case 0xa:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+				break;
+			default:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+		} else {
+			if (i == ATOM_DEVICE_DFP1_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+			else if (i == ATOM_DEVICE_DFP2_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+			else
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+		}
+
+		/* Always set the connector type to VGA for CRT1/CRT2. if they are
+		 * shared with a DVI port, we'll pick up the DVI connector when we
+		 * merge the outputs.  Some bioses incorrectly list VGA ports as DVI.
+		 */
+		if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX)
+			bios_connectors[i].connector_type =
+			    DRM_MODE_CONNECTOR_VGA;
+
+		if (!radeon_atom_apply_quirks
+		    (dev, (1 << i), &bios_connectors[i].connector_type,
+		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+		     &bios_connectors[i].hpd))
+			continue;
+
+		bios_connectors[i].valid = true;
+		bios_connectors[i].devices = (1 << i);
+
+		if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
+			radeon_add_atom_encoder(dev,
+						radeon_get_encoder_enum(dev,
+								      (1 << i),
+								      dac),
+						(1 << i),
+						0);
+		else
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									(1 << i),
+									dac),
+						  (1 << i));
+	}
+
+	/* combine shared connectors */
+	for (i = 0; i < max_device; i++) {
+		if (bios_connectors[i].valid) {
+			for (j = 0; j < max_device; j++) {
+				if (bios_connectors[j].valid && (i != j)) {
+					if (bios_connectors[i].line_mux ==
+					    bios_connectors[j].line_mux) {
+						/* make sure not to combine LVDS */
+						if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+							bios_connectors[i].line_mux = 53;
+							bios_connectors[i].ddc_bus.valid = false;
+							continue;
+						}
+						if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+							bios_connectors[j].line_mux = 53;
+							bios_connectors[j].ddc_bus.valid = false;
+							continue;
+						}
+						/* combine analog and digital for DVI-I */
+						if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+						     (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
+						    ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+						     (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
+							bios_connectors[i].devices |=
+								bios_connectors[j].devices;
+							bios_connectors[i].connector_type =
+								DRM_MODE_CONNECTOR_DVII;
+							if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
+								bios_connectors[i].hpd =
+									bios_connectors[j].hpd;
+							bios_connectors[j].valid = false;
+						}
+					}
+				}
+			}
+		}
+	}
+
+	/* add the connectors */
+	for (i = 0; i < max_device; i++) {
+		if (bios_connectors[i].valid) {
+			uint16_t connector_object_id =
+				atombios_get_connector_object_id(dev,
+						      bios_connectors[i].connector_type,
+						      bios_connectors[i].devices);
+			radeon_add_atom_connector(dev,
+						  bios_connectors[i].line_mux,
+						  bios_connectors[i].devices,
+						  bios_connectors[i].
+						  connector_type,
+						  &bios_connectors[i].ddc_bus,
+						  0,
+						  connector_object_id,
+						  &bios_connectors[i].hpd,
+						  &router);
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	free(bios_connectors, DRM_MEM_DRIVER);
+	return true;
+}
+
+union firmware_info {
+	ATOM_FIRMWARE_INFO info;
+	ATOM_FIRMWARE_INFO_V1_2 info_12;
+	ATOM_FIRMWARE_INFO_V1_3 info_13;
+	ATOM_FIRMWARE_INFO_V1_4 info_14;
+	ATOM_FIRMWARE_INFO_V2_1 info_21;
+	ATOM_FIRMWARE_INFO_V2_2 info_22;
+};
+
+bool radeon_atom_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	union firmware_info *firmware_info;
+	uint8_t frev, crev;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *dcpll = &rdev->clock.dcpll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	uint16_t data_offset;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)((char *)mode_info->atom_context->bios +
+						data_offset);
+		/* pixel clocks */
+		p1pll->reference_freq =
+		    le16_to_cpu(firmware_info->info.usReferenceClock);
+		p1pll->reference_div = 0;
+
+		if (crev < 2)
+			p1pll->pll_out_min =
+				le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+		else
+			p1pll->pll_out_min =
+				le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+		p1pll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+
+		if (crev >= 4) {
+			p1pll->lcd_pll_out_min =
+				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+			if (p1pll->lcd_pll_out_min == 0)
+				p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+			p1pll->lcd_pll_out_max =
+				le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+			if (p1pll->lcd_pll_out_max == 0)
+				p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+		} else {
+			p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+			p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+		}
+
+		if (p1pll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				p1pll->pll_out_min = 64800;
+			else
+				p1pll->pll_out_min = 20000;
+		}
+
+		p1pll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
+		p1pll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
+
+		*p2pll = *p1pll;
+
+		/* system clock */
+		if (ASIC_IS_DCE4(rdev))
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+		else
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
+		spll->reference_div = 0;
+
+		spll->pll_out_min =
+		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
+		spll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
+
+		/* ??? */
+		if (spll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				spll->pll_out_min = 64800;
+			else
+				spll->pll_out_min = 20000;
+		}
+
+		spll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
+		spll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
+
+		/* memory clock */
+		if (ASIC_IS_DCE4(rdev))
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+		else
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
+		mpll->reference_div = 0;
+
+		mpll->pll_out_min =
+		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
+		mpll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
+
+		/* ??? */
+		if (mpll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				mpll->pll_out_min = 64800;
+			else
+				mpll->pll_out_min = 20000;
+		}
+
+		mpll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
+		mpll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
+
+		rdev->clock.default_sclk =
+		    le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
+		rdev->clock.default_mclk =
+		    le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+
+		if (ASIC_IS_DCE4(rdev)) {
+			rdev->clock.default_dispclk =
+				le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+			if (rdev->clock.default_dispclk == 0) {
+				if (ASIC_IS_DCE5(rdev))
+					rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+				else
+					rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+			}
+			rdev->clock.dp_extclk =
+				le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+		}
+		*dcpll = *p1pll;
+
+		rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
+		if (rdev->clock.max_pixel_clock == 0)
+			rdev->clock.max_pixel_clock = 40000;
+
+		/* not technically a clock, but... */
+		rdev->mode_info.firmware_flags =
+			le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
+		return true;
+	}
+
+	return false;
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+};
+
+bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+
+	/* sideport is AMD only */
+	if (rdev->family == CHIP_RS600)
+		return false;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)((char *)mode_info->atom_context->bios +
+				      data_offset);
+		switch (crev) {
+		case 1:
+			if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
+				return true;
+			break;
+		case 2:
+			if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
+				return true;
+			break;
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			break;
+		}
+	}
+	return false;
+}
+
+bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+				   struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
+	uint16_t data_offset;
+	struct _ATOM_TMDS_INFO *tmds_info;
+	uint8_t frev, crev;
+	uint16_t maxfreq;
+	int i;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		tmds_info =
+			(struct _ATOM_TMDS_INFO *)((char *)mode_info->atom_context->bios +
+						   data_offset);
+
+		maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
+		for (i = 0; i < 4; i++) {
+			tmds->tmds_pll[i].freq =
+			    le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
+			tmds->tmds_pll[i].value =
+			    tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_VCO_Gain & 0x3f) << 6;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_DutyCycle & 0xf) << 12;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_VoltageSwing & 0xf) << 16;
+
+			DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n",
+				  tmds->tmds_pll[i].freq,
+				  tmds->tmds_pll[i].value);
+
+			if (maxfreq == tmds->tmds_pll[i].freq) {
+				tmds->tmds_pll[i].freq = 0xffffffff;
+				break;
+			}
+		}
+		return true;
+	}
+	return false;
+}
+
+bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+				      struct radeon_atom_ss *ss,
+				      int id)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
+	uint16_t data_offset, size;
+	struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+	uint8_t frev, crev;
+	int i, num_indices;
+
+	memset(ss, 0, sizeof(struct radeon_atom_ss));
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		ss_info =
+			(struct _ATOM_SPREAD_SPECTRUM_INFO *)((char *)mode_info->atom_context->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+		for (i = 0; i < num_indices; i++) {
+			if (ss_info->asSS_Info[i].ucSS_Id == id) {
+				ss->percentage =
+					le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+				ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+				ss->step = ss_info->asSS_Info[i].ucSS_Step;
+				ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+				ss->range = ss_info->asSS_Info[i].ucSS_Range;
+				ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+				return true;
+			}
+		}
+	}
+	return false;
+}
+
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+						 struct radeon_atom_ss *ss,
+						 int id)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	u16 data_offset, size;
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 percentage = 0, rate = 0;
+
+	/* get any igp specific overrides */
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)
+			((char *)mode_info->atom_context->bios + data_offset);
+		switch (crev) {
+		case 6:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		case 7:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			break;
+		}
+		if (percentage)
+			ss->percentage = percentage;
+		if (rate)
+			ss->rate = rate;
+	}
+}
+
+union asic_ss_info {
+	struct _ATOM_ASIC_INTERNAL_SS_INFO info;
+	struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
+	struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
+};
+
+bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+				      struct radeon_atom_ss *ss,
+				      int id, u32 clock)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+	uint16_t data_offset, size;
+	union asic_ss_info *ss_info;
+	uint8_t frev, crev;
+	int i, num_indices;
+
+	memset(ss, 0, sizeof(struct radeon_atom_ss));
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+
+		ss_info =
+			(union asic_ss_info *)((char *)mode_info->atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 1:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+					ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+					return true;
+				}
+			}
+			break;
+		case 2:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+					ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+					return true;
+				}
+			}
+			break;
+		case 3:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+					ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+					if (rdev->flags & RADEON_IS_IGP)
+						radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
+					return true;
+				}
+			}
+			break;
+		default:
+			DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
+			break;
+		}
+
+	}
+	return false;
+}
+
+union lvds_info {
+	struct _ATOM_LVDS_INFO info;
+	struct _ATOM_LVDS_INFO_V12 info_12;
+};
+
+struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+							      radeon_encoder
+							      *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
+	uint16_t data_offset, misc;
+	union lvds_info *lvds_info;
+	uint8_t frev, crev;
+	struct radeon_encoder_atom_dig *lvds = NULL;
+	int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		lvds_info =
+			(union lvds_info *)((char *)mode_info->atom_context->bios + data_offset);
+		lvds =
+		    malloc(sizeof(struct radeon_encoder_atom_dig),
+			DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+		if (!lvds)
+			return NULL;
+
+		lvds->native_mode.clock =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
+		lvds->native_mode.hdisplay =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
+		lvds->native_mode.vdisplay =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
+		lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+		lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+		lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+		lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+		lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
+		lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+		lvds->panel_pwr_delay =
+		    le16_to_cpu(lvds_info->info.usOffDelayInMs);
+		lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
+
+		misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+		lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
+
+		/* set crtc values */
+		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+		lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
+
+		encoder->native_mode = lvds->native_mode;
+
+		if (encoder_enum == 2)
+			lvds->linkb = true;
+		else
+			lvds->linkb = false;
+
+		/* parse the lcd record table */
+		if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
+			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+			bool bad_record = false;
+			u8 *record;
+
+			if ((frev == 1) && (crev < 2))
+				/* absolute */
+				record = (u8 *)((char *)mode_info->atom_context->bios +
+						le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+			else
+				/* relative */
+				record = (u8 *)((char *)mode_info->atom_context->bios +
+						data_offset +
+						le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+			while (*record != ATOM_RECORD_END_TYPE) {
+				switch (*record) {
+				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+					record += sizeof(ATOM_PATCH_RECORD_MODE);
+					break;
+				case LCD_RTS_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_RTS_RECORD);
+					break;
+				case LCD_CAP_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+					break;
+				case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+					fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+					if (fake_edid_record->ucFakeEDIDLength) {
+						struct edid *edid;
+						int edid_size =
+							max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+						edid = malloc(edid_size, DRM_MEM_KMS, M_NOWAIT);
+						if (edid) {
+							memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+							       fake_edid_record->ucFakeEDIDLength);
+
+							if (drm_edid_is_valid(edid)) {
+								rdev->mode_info.bios_hardcoded_edid = edid;
+								rdev->mode_info.bios_hardcoded_edid_size = edid_size;
+							} else
+								free(edid, DRM_MEM_KMS);
+						}
+					}
+					record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+					break;
+				case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+					panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+					lvds->native_mode.width_mm = panel_res_record->usHSize;
+					lvds->native_mode.height_mm = panel_res_record->usVSize;
+					record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+					break;
+				default:
+					DRM_ERROR("Bad LCD record %d\n", *record);
+					bad_record = true;
+					break;
+				}
+				if (bad_record)
+					break;
+			}
+		}
+	}
+	return lvds;
+}
+
+struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+	uint16_t data_offset;
+	struct _COMPASSIONATE_DATA *dac_info;
+	uint8_t frev, crev;
+	uint8_t bg, dac;
+	struct radeon_encoder_primary_dac *p_dac = NULL;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		dac_info = (struct _COMPASSIONATE_DATA *)
+			((char *)mode_info->atom_context->bios + data_offset);
+
+		p_dac = malloc(sizeof(struct radeon_encoder_primary_dac),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+		if (!p_dac)
+			return NULL;
+
+		bg = dac_info->ucDAC1_BG_Adjustment;
+		dac = dac_info->ucDAC1_DAC_Adjustment;
+		p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+
+	}
+	return p_dac;
+}
+
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+				struct drm_display_mode *mode)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	ATOM_ANALOG_TV_INFO *tv_info;
+	ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2;
+	ATOM_DTD_FORMAT *dtd_timings;
+	int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+	u8 frev, crev;
+	u16 data_offset, misc;
+
+	if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
+				    &frev, &crev, &data_offset))
+		return false;
+
+	switch (crev) {
+	case 1:
+		tv_info = (ATOM_ANALOG_TV_INFO *)((char *)mode_info->atom_context->bios + data_offset);
+		if (index >= MAX_SUPPORTED_TV_TIMING)
+			return false;
+
+		mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
+		mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
+		mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
+		mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) +
+			le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
+
+		mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
+		mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
+		mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
+		mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) +
+			le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
+
+		mode->flags = 0;
+		misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			mode->flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			mode->flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+
+		if (index == 1) {
+			/* PAL timings appear to have wrong values for totals */
+			mode->crtc_htotal -= 1;
+			mode->crtc_vtotal -= 1;
+		}
+		break;
+	case 2:
+		tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)((char *)mode_info->atom_context->bios + data_offset);
+		if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
+			return false;
+
+		dtd_timings = &tv_info_v1_2->aModeTimings[index];
+		mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) +
+			le16_to_cpu(dtd_timings->usHBlanking_Time);
+		mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive);
+		mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) +
+			le16_to_cpu(dtd_timings->usHSyncOffset);
+		mode->crtc_hsync_end = mode->crtc_hsync_start +
+			le16_to_cpu(dtd_timings->usHSyncWidth);
+
+		mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) +
+			le16_to_cpu(dtd_timings->usVBlanking_Time);
+		mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive);
+		mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) +
+			le16_to_cpu(dtd_timings->usVSyncOffset);
+		mode->crtc_vsync_end = mode->crtc_vsync_start +
+			le16_to_cpu(dtd_timings->usVSyncWidth);
+
+		mode->flags = 0;
+		misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			mode->flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			mode->flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+		break;
+	}
+	return true;
+}
+
+enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+	uint16_t data_offset;
+	uint8_t frev, crev;
+	struct _ATOM_ANALOG_TV_INFO *tv_info;
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+
+		tv_info = (struct _ATOM_ANALOG_TV_INFO *)
+			((char *)mode_info->atom_context->bios + data_offset);
+
+		switch (tv_info->ucTV_BootUpDefaultStandard) {
+		case ATOM_TV_NTSC:
+			tv_std = TV_STD_NTSC;
+			DRM_DEBUG_KMS("Default TV standard: NTSC\n");
+			break;
+		case ATOM_TV_NTSCJ:
+			tv_std = TV_STD_NTSC_J;
+			DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
+			break;
+		case ATOM_TV_PAL:
+			tv_std = TV_STD_PAL;
+			DRM_DEBUG_KMS("Default TV standard: PAL\n");
+			break;
+		case ATOM_TV_PALM:
+			tv_std = TV_STD_PAL_M;
+			DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
+			break;
+		case ATOM_TV_PALN:
+			tv_std = TV_STD_PAL_N;
+			DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
+			break;
+		case ATOM_TV_PALCN:
+			tv_std = TV_STD_PAL_CN;
+			DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
+			break;
+		case ATOM_TV_PAL60:
+			tv_std = TV_STD_PAL_60;
+			DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
+			break;
+		case ATOM_TV_SECAM:
+			tv_std = TV_STD_SECAM;
+			DRM_DEBUG_KMS("Default TV standard: SECAM\n");
+			break;
+		default:
+			tv_std = TV_STD_NTSC;
+			DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
+			break;
+		}
+	}
+	return tv_std;
+}
+
+struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+	uint16_t data_offset;
+	struct _COMPASSIONATE_DATA *dac_info;
+	uint8_t frev, crev;
+	uint8_t bg, dac;
+	struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+
+		dac_info = (struct _COMPASSIONATE_DATA *)
+			((char *)mode_info->atom_context->bios + data_offset);
+
+		tv_dac = malloc(sizeof(struct radeon_encoder_tv_dac),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+		if (!tv_dac)
+			return NULL;
+
+		bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
+		dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
+		tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+		bg = dac_info->ucDAC2_PAL_BG_Adjustment;
+		dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
+		tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+		bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
+		dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
+		tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+
+		tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
+	}
+	return tv_dac;
+}
+
+static const char *thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm64",
+	"f75375",
+	"asc7xxx",
+};
+
+static const char *pp_lib_thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm64",
+	"f75375",
+	"RV6xx",
+	"RV770",
+	"adt7473",
+	"NONE",
+	"External GPIO",
+	"Evergreen",
+	"emc2103",
+	"Sumo",
+	"Northern Islands",
+	"Southern Islands",
+	"lm96163",
+};
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+						 int state_index,
+						 u32 misc, u32 misc2)
+{
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	/* order matters! */
+	if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_POWERSAVE;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	}
+	if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[0];
+	} else if (state_index == 0) {
+		rdev->pm.power_state[state_index].clock_info[0].flags |=
+			RADEON_PM_MODE_NO_DISPLAY;
+	}
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	u32 misc, misc2 = 0;
+	int num_modes = 0, i;
+	int state_index = 0;
+	struct radeon_i2c_bus_rec i2c_bus;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)((char *)mode_info->atom_context->bios + data_offset);
+
+	/* add the i2c bus for thermal/fan chip */
+	if ((power_info->info.ucOverdriveThermalController > 0) &&
+	    (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
+		DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+			 thermal_controller_names[power_info->info.ucOverdriveThermalController],
+			 power_info->info.ucOverdriveControllerAddress >> 1);
+		i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+		rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+#ifdef FREEBSD_WIP
+		if (rdev->pm.i2c_bus) {
+			struct i2c_board_info info = { };
+			const char *name = thermal_controller_names[power_info->info.
+								    ucOverdriveThermalController];
+			info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+			strlcpy(info.type, name, sizeof(info.type));
+			i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+		}
+#endif /* FREEBSD_WIP */
+	}
+	num_modes = power_info->info.ucNumOfPowerModeEntries;
+	if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+		num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+	if (num_modes == 0)
+		return state_index;
+	rdev->pm.power_state = malloc(sizeof(struct radeon_power_state) * num_modes,
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!rdev->pm.power_state)
+		return state_index;
+	/* last mode is usually default, array is low to high */
+	for (i = 0; i < num_modes; i++) {
+		rdev->pm.power_state[state_index].clock_info =
+			malloc(sizeof(struct radeon_pm_clock_info) * 1,
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (!rdev->pm.power_state[state_index].clock_info)
+			return state_index;
+		rdev->pm.power_state[state_index].num_clock_modes = 1;
+		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+		switch (frev) {
+		case 1:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+			state_index++;
+			break;
+		case 2:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		case 3:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+				if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+						true;
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+						power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+				}
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		}
+	}
+	/* last mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[state_index - 1].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index - 1;
+		rdev->pm.power_state[state_index - 1].default_clock_mode =
+			&rdev->pm.power_state[state_index - 1].clock_info[0];
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+		rdev->pm.power_state[state_index].misc = 0;
+		rdev->pm.power_state[state_index].misc2 = 0;
+	}
+	return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+							 ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* add the i2c bus for thermal/fan chip */
+	if (controller->ucType > 0) {
+		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
+		} else if ((controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+			   (controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+			   (controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+			DRM_INFO("Special thermal controller config\n");
+		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+				 pp_lib_thermal_controller_names[controller->ucType],
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+#ifdef FREEBSD_WIP
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = pp_lib_thermal_controller_names[controller->ucType];
+				info.addr = controller->ucI2cAddress >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+			}
+#endif /* FREEBSD_WIP */
+		} else {
+			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+				 controller->ucType,
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+		}
+	}
+}
+
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+						 u16 *vddc, u16 *vddci)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	u8 frev, crev;
+	u16 data_offset;
+	union firmware_info *firmware_info;
+
+	*vddc = 0;
+	*vddci = 0;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)((char *)mode_info->atom_context->bios +
+						data_offset);
+		*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+		if ((frev == 2) && (crev >= 2))
+			*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+	}
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+						       int state_index, int mode_index,
+						       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+	int j;
+	u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+	u16 vddc, vddci;
+
+	radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
+
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	rdev->pm.power_state[state_index].pcie_lanes =
+		((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+		 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+	switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+		if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	}
+	rdev->pm.power_state[state_index].flags = 0;
+	if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+		rdev->pm.power_state[state_index].flags |=
+			RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+		if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+			/* NI chips post without MC ucode, so default clocks are strobe mode only */
+			rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+			rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+			rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+			rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
+		} else {
+			/* patch the table values with the default slck/mclk from firmware info */
+			for (j = 0; j < mode_index; j++) {
+				rdev->pm.power_state[state_index].clock_info[j].mclk =
+					rdev->clock.default_mclk;
+				rdev->pm.power_state[state_index].clock_info[j].sclk =
+					rdev->clock.default_sclk;
+				if (vddc)
+					rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+						vddc;
+			}
+		}
+	}
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+						   int state_index, int mode_index,
+						   union pplib_clock_info *clock_info)
+{
+	u32 sclk, mclk;
+	u16 vddc;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family >= CHIP_PALM) {
+			sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+			sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		} else {
+			sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+			sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		}
+	} else if (ASIC_IS_DCE6(rdev)) {
+		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+		sclk |= clock_info->si.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+		mclk |= clock_info->si.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->si.usVDDC);
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+			le16_to_cpu(clock_info->si.usVDDCI);
+	} else if (ASIC_IS_DCE4(rdev)) {
+		sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+		sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+		mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->evergreen.usVDDC);
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+			le16_to_cpu(clock_info->evergreen.usVDDCI);
+	} else {
+		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+		sclk |= clock_info->r600.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+		mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->r600.usVDDC);
+	}
+
+	/* patch up vddc if necessary */
+	switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) {
+	case ATOM_VIRTUAL_VOLTAGE_ID0:
+	case ATOM_VIRTUAL_VOLTAGE_ID1:
+	case ATOM_VIRTUAL_VOLTAGE_ID2:
+	case ATOM_VIRTUAL_VOLTAGE_ID3:
+		if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
+					     rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
+					     &vddc) == 0)
+			rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+		break;
+	default:
+		break;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* skip invalid modes */
+		if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+			return false;
+	} else {
+		/* skip invalid modes */
+		if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+		    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+			return false;
+	}
+	return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)((char *)mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	if (power_info->pplib.ucNumStates == 0)
+		return state_index;
+	rdev->pm.power_state = malloc(sizeof(struct radeon_power_state) *
+				       power_info->pplib.ucNumStates,
+				       DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!rdev->pm.power_state)
+		return state_index;
+	/* first mode is usually default, followed by low to high */
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)
+			((char *)mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			((char *)mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		rdev->pm.power_state[i].clock_info = malloc(sizeof(struct radeon_pm_clock_info) *
+							     ((power_info->pplib.ucStateEntrySize - 1) ?
+							      (power_info->pplib.ucStateEntrySize - 1) : 1),
+							     DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (!rdev->pm.power_state[i].clock_info)
+			return state_index;
+		if (power_info->pplib.ucStateEntrySize - 1) {
+			for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+				clock_info = (union pplib_clock_info *)
+					((char *)mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+					 (power_state->v1.ucClockStateIndices[j] *
+					  power_info->pplib.ucClockInfoSize));
+				valid = radeon_atombios_parse_pplib_clock_info(rdev,
+									       state_index, mode_index,
+									       clock_info);
+				if (valid)
+					mode_index++;
+			}
+		} else {
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				rdev->clock.default_mclk;
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				rdev->clock.default_sclk;
+			mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, non_clock_array_index, clock_array_index;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)((char *)mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	state_array = (struct _StateArray *)
+		((char *)mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		((char *)mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		((char *)mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+	if (state_array->ucNumEntries == 0)
+		return state_index;
+	rdev->pm.power_state = malloc(sizeof(struct radeon_power_state) *
+				       state_array->ucNumEntries,
+				       DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!rdev->pm.power_state)
+		return state_index;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		rdev->pm.power_state[i].clock_info = malloc(sizeof(struct radeon_pm_clock_info) *
+							     (power_state->v2.ucNumDPMLevels ?
+							      power_state->v2.ucNumDPMLevels : 1),
+							     DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (!rdev->pm.power_state[i].clock_info)
+			return state_index;
+		if (power_state->v2.ucNumDPMLevels) {
+			for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+				clock_array_index = power_state->v2.clockInfoIndex[j];
+				clock_info = (union pplib_clock_info *)
+					&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+				valid = radeon_atombios_parse_pplib_clock_info(rdev,
+									       state_index, mode_index,
+									       clock_info);
+				if (valid)
+					mode_index++;
+			}
+		} else {
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				rdev->clock.default_mclk;
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				rdev->clock.default_sclk;
+			mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	int state_index = 0;
+
+	rdev->pm.default_power_state_index = -1;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		switch (frev) {
+		case 1:
+		case 2:
+		case 3:
+			state_index = radeon_atombios_parse_power_table_1_3(rdev);
+			break;
+		case 4:
+		case 5:
+			state_index = radeon_atombios_parse_power_table_4_5(rdev);
+			break;
+		case 6:
+			state_index = radeon_atombios_parse_power_table_6(rdev);
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (state_index == 0) {
+		rdev->pm.power_state = malloc(sizeof(struct radeon_power_state),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (rdev->pm.power_state) {
+			rdev->pm.power_state[0].clock_info =
+				malloc(sizeof(struct radeon_pm_clock_info) * 1,
+				    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (rdev->pm.power_state[0].clock_info) {
+				/* add the default mode */
+				rdev->pm.power_state[state_index].type =
+					POWER_STATE_TYPE_DEFAULT;
+				rdev->pm.power_state[state_index].num_clock_modes = 1;
+				rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+				rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+				rdev->pm.power_state[state_index].default_clock_mode =
+					&rdev->pm.power_state[state_index].clock_info[0];
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+				rdev->pm.power_state[state_index].pcie_lanes = 16;
+				rdev->pm.default_power_state_index = state_index;
+				rdev->pm.power_state[state_index].flags = 0;
+				state_index++;
+			}
+		}
+	}
+
+	rdev->pm.num_power_states = state_index;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	if (rdev->pm.default_power_state_index >= 0)
+		rdev->pm.current_vddc =
+			rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+	else
+		rdev->pm.current_vddc = 0;
+}
+
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+	DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+
+	args.ucEnable = enable;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
+{
+	GET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return le32_to_cpu(args.ulReturnEngineClock);
+}
+
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
+{
+	GET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return le32_to_cpu(args.ulReturnMemoryClock);
+}
+
+void radeon_atom_set_engine_clock(struct radeon_device *rdev,
+				  uint32_t eng_clock)
+{
+	SET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
+
+	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_memory_clock(struct radeon_device *rdev,
+				  uint32_t mem_clock)
+{
+	SET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union set_voltage {
+	struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+	struct _SET_VOLTAGE_PARAMETERS v1;
+	struct _SET_VOLTAGE_PARAMETERS_V2 v2;
+	struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
+};
+
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev, volt_index = voltage_level;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	/* 0xff01 is a flag rather then an actual voltage */
+	if (voltage_level == 0xff01)
+		return;
+
+	switch (crev) {
+	case 1:
+		args.v1.ucVoltageType = voltage_type;
+		args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
+		args.v1.ucVoltageIndex = volt_index;
+		break;
+	case 2:
+		args.v2.ucVoltageType = voltage_type;
+		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+				    u16 voltage_id, u16 *voltage)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 1:
+		return -EINVAL;
+	case 2:
+		args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+		args.v2.ucVoltageMode = 0;
+		args.v2.usVoltageLevel = 0;
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v2.usVoltageLevel);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v3.usVoltageLevel);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_2_scratch, bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600) {
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	} else {
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	}
+
+	/* let the bios control the backlight */
+	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
+
+	/* tell the bios not to handle mode switching */
+	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+
+	if (rdev->family >= CHIP_R600) {
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	} else {
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	}
+
+}
+
+void radeon_save_bios_scratch_regs(struct radeon_device *rdev)
+{
+	uint32_t scratch_reg;
+	int i;
+
+	if (rdev->family >= CHIP_R600)
+		scratch_reg = R600_BIOS_0_SCRATCH;
+	else
+		scratch_reg = RADEON_BIOS_0_SCRATCH;
+
+	for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++)
+		rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4));
+}
+
+void radeon_restore_bios_scratch_regs(struct radeon_device *rdev)
+{
+	uint32_t scratch_reg;
+	int i;
+
+	if (rdev->family >= CHIP_R600)
+		scratch_reg = R600_BIOS_0_SCRATCH;
+	else
+		scratch_reg = RADEON_BIOS_0_SCRATCH;
+
+	for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++)
+		WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]);
+}
+
+void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	else
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (lock) {
+		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+	} else {
+		bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch |= ATOM_S6_ACC_MODE;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	else
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+/* at some point we may want to break this out into individual functions */
+void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector =
+	    to_radeon_connector(connector);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600) {
+		bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+		bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	} else {
+		bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+		bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	}
+
+	if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("TV1 connected\n");
+			bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
+		} else {
+			DRM_DEBUG_KMS("TV1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_TV1_MASK;
+			bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CV connected\n");
+			bios_3_scratch |= ATOM_S3_CV_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
+		} else {
+			DRM_DEBUG_KMS("CV disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CV_MASK;
+			bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("LCD1 connected\n");
+			bios_0_scratch |= ATOM_S0_LCD1;
+			bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
+		} else {
+			DRM_DEBUG_KMS("LCD1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_LCD1;
+			bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT1 connected\n");
+			bios_0_scratch |= ATOM_S0_CRT1_COLOR;
+			bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
+		} else {
+			DRM_DEBUG_KMS("CRT1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
+			bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT2 connected\n");
+			bios_0_scratch |= ATOM_S0_CRT2_COLOR;
+			bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
+		} else {
+			DRM_DEBUG_KMS("CRT2 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
+			bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP1 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP1;
+			bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
+		} else {
+			DRM_DEBUG_KMS("DFP1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP1;
+			bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP2 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP2;
+			bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
+		} else {
+			DRM_DEBUG_KMS("DFP2 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP2;
+			bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP3 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP3;
+			bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
+		} else {
+			DRM_DEBUG_KMS("DFP3 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP3;
+			bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP4 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP4;
+			bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
+		} else {
+			DRM_DEBUG_KMS("DFP4 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP4;
+			bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP5 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP5;
+			bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
+		} else {
+			DRM_DEBUG_KMS("DFP5 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP5;
+			bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP6 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP6;
+			bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
+		} else {
+			DRM_DEBUG_KMS("DFP6 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP6;
+			bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
+		}
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
+		WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	} else {
+		WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+		WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	}
+}
+
+void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_3_scratch;
+
+	if (ASIC_IS_DCE4(rdev))
+		return;
+
+	if (rdev->family >= CHIP_R600)
+		bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+	else
+		bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 18);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 24);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 16);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 20);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 17);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 19);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 23);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 25);
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+	else
+		WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+}
+
+void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_2_scratch;
+
+	if (ASIC_IS_DCE4(rdev))
+		return;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CV_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+	else
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_atombios.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_benchmark.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_benchmark.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_benchmark.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,258 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_benchmark.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define RADEON_BENCHMARK_COPY_BLIT 1
+#define RADEON_BENCHMARK_COPY_DMA  0
+
+#define RADEON_BENCHMARK_ITERATIONS 1024
+#define RADEON_BENCHMARK_COMMON_MODES_N 17
+
+static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
+				    uint64_t saddr, uint64_t daddr,
+				    int flag, int n)
+{
+	unsigned long start_jiffies;
+	unsigned long end_jiffies;
+	struct radeon_fence *fence = NULL;
+	int i, r;
+
+	start_jiffies = jiffies;
+	for (i = 0; i < n; i++) {
+		switch (flag) {
+		case RADEON_BENCHMARK_COPY_DMA:
+			r = radeon_copy_dma(rdev, saddr, daddr,
+					    size / RADEON_GPU_PAGE_SIZE,
+					    &fence);
+			break;
+		case RADEON_BENCHMARK_COPY_BLIT:
+			r = radeon_copy_blit(rdev, saddr, daddr,
+					     size / RADEON_GPU_PAGE_SIZE,
+					     &fence);
+			break;
+		default:
+			DRM_ERROR("Unknown copy method\n");
+			r = -EINVAL;
+		}
+		if (r)
+			goto exit_do_move;
+		r = radeon_fence_wait(fence, false);
+		if (r)
+			goto exit_do_move;
+		radeon_fence_unref(&fence);
+	}
+	end_jiffies = jiffies;
+	r = jiffies_to_msecs(end_jiffies - start_jiffies);
+
+exit_do_move:
+	if (fence)
+		radeon_fence_unref(&fence);
+	return r;
+}
+
+
+static void radeon_benchmark_log_results(int n, unsigned size,
+					 unsigned int time,
+					 unsigned sdomain, unsigned ddomain,
+					 char *kind)
+{
+	unsigned int throughput = (n * (size >> 10)) / time;
+	DRM_INFO("radeon: %s %u bo moves of %u kB from"
+		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
+		 kind, n, size >> 10, sdomain, ddomain, time,
+		 throughput * 8, throughput);
+}
+
+static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+				  unsigned sdomain, unsigned ddomain)
+{
+	struct radeon_bo *dobj = NULL;
+	struct radeon_bo *sobj = NULL;
+	uint64_t saddr, daddr;
+	int r, n;
+	int time;
+
+	n = RADEON_BENCHMARK_ITERATIONS;
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(sobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(sobj, sdomain, &saddr);
+	radeon_bo_unreserve(sobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(dobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(dobj, ddomain, &daddr);
+	radeon_bo_unreserve(dobj);
+	if (r) {
+		goto out_cleanup;
+	}
+
+	/* r100 doesn't have dma engine so skip the test */
+	/* also, VRAM-to-VRAM test doesn't make much sense for DMA */
+	/* skip it as well if domains are the same */
+	if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
+		time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+						RADEON_BENCHMARK_COPY_DMA, n);
+		if (time < 0)
+			goto out_cleanup;
+		if (time > 0)
+			radeon_benchmark_log_results(n, size, time,
+						     sdomain, ddomain, "dma");
+	}
+
+	if (rdev->asic->copy.blit) {
+		time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+						RADEON_BENCHMARK_COPY_BLIT, n);
+		if (time < 0)
+			goto out_cleanup;
+		if (time > 0)
+			radeon_benchmark_log_results(n, size, time,
+						     sdomain, ddomain, "blit");
+	}
+
+out_cleanup:
+	if (sobj) {
+		r = radeon_bo_reserve(sobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(sobj);
+			radeon_bo_unreserve(sobj);
+		}
+		radeon_bo_unref(&sobj);
+	}
+	if (dobj) {
+		r = radeon_bo_reserve(dobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(dobj);
+			radeon_bo_unreserve(dobj);
+		}
+		radeon_bo_unref(&dobj);
+	}
+
+	if (r) {
+		DRM_ERROR("Error while benchmarking BO move.\n");
+	}
+}
+
+void radeon_benchmark(struct radeon_device *rdev, int test_number)
+{
+	int i;
+	int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = {
+		640 * 480 * 4,
+		720 * 480 * 4,
+		800 * 600 * 4,
+		848 * 480 * 4,
+		1024 * 768 * 4,
+		1152 * 768 * 4,
+		1280 * 720 * 4,
+		1280 * 800 * 4,
+		1280 * 854 * 4,
+		1280 * 960 * 4,
+		1280 * 1024 * 4,
+		1440 * 900 * 4,
+		1400 * 1050 * 4,
+		1680 * 1050 * 4,
+		1600 * 1200 * 4,
+		1920 * 1080 * 4,
+		1920 * 1200 * 4
+	};
+
+	switch (test_number) {
+	case 1:
+		/* simple test, VRAM to GTT and GTT to VRAM */
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
+				      RADEON_GEM_DOMAIN_VRAM);
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+				      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 2:
+		/* simple test, VRAM to VRAM */
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+				      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 3:
+		/* GTT to VRAM, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_GTT,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 4:
+		/* VRAM to GTT, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 5:
+		/* VRAM to VRAM, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 6:
+		/* GTT to VRAM, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_GTT,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 7:
+		/* VRAM to GTT, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 8:
+		/* VRAM to VRAM, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+
+	default:
+		DRM_ERROR("Unknown benchmark\n");
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_benchmark.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_bios.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_bios.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_bios.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,751 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_bios.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/*
+ * BIOS.
+ */
+
+/* If you boot an IGP board with a discrete card as the primary,
+ * the IGP rom is not accessible via the rom bar as the IGP rom is
+ * part of the system bios.  On boot, the system bios puts a
+ * copy of the igp rom at the start of vram if a discrete card is
+ * present.
+ */
+static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+{
+	struct drm_local_map bios_map;
+	uint8_t __iomem *bios;
+	resource_size_t vram_base;
+	resource_size_t size = 256 * 1024; /* ??? */
+
+	DRM_INFO("%s: ===> Try IGP's VRAM...\n", __func__);
+
+	if (!(rdev->flags & RADEON_IS_IGP))
+		if (!radeon_card_posted(rdev)) {
+			DRM_INFO("%s: not POSTed discrete card detected, skipping this method...\n",
+			    __func__);
+			return false;
+		}
+
+	rdev->bios = NULL;
+	vram_base = drm_get_resource_start(rdev->ddev, 0);
+	DRM_INFO("%s: VRAM base address: 0x%jx\n", __func__, (uintmax_t)vram_base);
+
+	bios_map.offset = vram_base;
+	bios_map.size   = size;
+	bios_map.type   = 0;
+	bios_map.flags  = 0;
+	bios_map.mtrr   = 0;
+	drm_core_ioremap(&bios_map, rdev->ddev);
+	if (bios_map.handle == NULL) {
+		DRM_INFO("%s: failed to ioremap\n", __func__);
+		return false;
+	}
+	bios = bios_map.handle;
+	size = bios_map.size;
+	DRM_INFO("%s: Map address: %p (%ju bytes)\n", __func__, bios, (uintmax_t)size);
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		if (size == 0) {
+			DRM_INFO("%s: Incorrect BIOS size\n", __func__);
+		} else {
+			DRM_INFO("%s: Incorrect BIOS signature: 0x%02X%02X\n",
+			    __func__, bios[0], bios[1]);
+		}
+		drm_core_ioremapfree(&bios_map, rdev->ddev);
+		return false;
+	}
+	rdev->bios = malloc(size, DRM_MEM_DRIVER, M_NOWAIT);
+	if (rdev->bios == NULL) {
+		drm_core_ioremapfree(&bios_map, rdev->ddev);
+		return false;
+	}
+	memcpy_fromio(rdev->bios, bios, size);
+	drm_core_ioremapfree(&bios_map, rdev->ddev);
+	return true;
+}
+
+static bool radeon_read_bios(struct radeon_device *rdev)
+{
+	device_t vga_dev;
+	uint8_t __iomem *bios;
+	size_t size;
+
+	DRM_INFO("%s: ===> Try PCI Expansion ROM...\n", __func__);
+
+	vga_dev = device_get_parent(rdev->dev);
+	rdev->bios = NULL;
+	/* XXX: some cards may return 0 for rom size? ddx has a workaround */
+	bios = vga_pci_map_bios(vga_dev, &size);
+	if (!bios) {
+		return false;
+	}
+	DRM_INFO("%s: Map address: %p (%zu bytes)\n", __func__, bios, size);
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		if (size == 0) {
+			DRM_INFO("%s: Incorrect BIOS size\n", __func__);
+		} else {
+			DRM_INFO("%s: Incorrect BIOS signature: 0x%02X%02X\n",
+			    __func__, bios[0], bios[1]);
+		}
+		vga_pci_unmap_bios(vga_dev, bios);
+		return false;
+	}
+	rdev->bios = malloc(size, DRM_MEM_DRIVER, M_NOWAIT);
+	if (rdev->bios == NULL) {
+		vga_pci_unmap_bios(vga_dev, bios);
+		return false;
+	}
+	memcpy(rdev->bios, bios, size);
+	vga_pci_unmap_bios(vga_dev, bios);
+	return true;
+}
+
+#ifdef CONFIG_ACPI
+/* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * radeon_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int radeon_atrm_call(ACPI_HANDLE atrm_handle, uint8_t *bios,
+			    int offset, int len)
+{
+	ACPI_STATUS status;
+	ACPI_OBJECT atrm_arg_elements[2], *obj;
+	ACPI_OBJECT_LIST atrm_arg;
+	ACPI_BUFFER buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+	atrm_arg.Count = 2;
+	atrm_arg.Pointer = &atrm_arg_elements[0];
+
+	atrm_arg_elements[0].Type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[0].Integer.Value = offset;
+
+	atrm_arg_elements[1].Type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[1].Integer.Value = len;
+
+	status = AcpiEvaluateObject(atrm_handle, NULL, &atrm_arg, &buffer);
+	if (ACPI_FAILURE(status)) {
+		DRM_ERROR("failed to evaluate ATRM got %s\n", AcpiFormatException(status));
+		return -ENODEV;
+	}
+
+	obj = (ACPI_OBJECT *)buffer.Pointer;
+	memcpy(bios+offset, obj->Buffer.Pointer, obj->Buffer.Length);
+	len = obj->Buffer.Length;
+	AcpiOsFree(buffer.Pointer);
+	return len;
+}
+
+static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+	int ret;
+	int size = 256 * 1024;
+	int i;
+	device_t dev;
+	ACPI_HANDLE dhandle, atrm_handle;
+	ACPI_STATUS status;
+	bool found = false;
+
+	DRM_INFO("%s: ===> Try ATRM...\n", __func__);
+
+	/* ATRM is for the discrete card only */
+	if (rdev->flags & RADEON_IS_IGP) {
+		DRM_INFO("%s: IGP card detected, skipping this method...\n",
+		    __func__);
+		return false;
+	}
+
+#ifdef FREEBSD_WIP
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+#endif /* FREEBSD_WIP */
+	if ((dev = pci_find_class(PCIC_DISPLAY, PCIS_DISPLAY_VGA)) != NULL) {
+		DRM_INFO("%s: pci_find_class() found: %d:%d:%d:%d, vendor=%04x, device=%04x\n",
+		    __func__,
+		    pci_get_domain(dev),
+		    pci_get_bus(dev),
+		    pci_get_slot(dev),
+		    pci_get_function(dev),
+		    pci_get_vendor(dev),
+		    pci_get_device(dev));
+		DRM_INFO("%s: Get ACPI device handle\n", __func__);
+		dhandle = acpi_get_handle(dev);
+#ifdef FREEBSD_WIP
+		if (!dhandle)
+			continue;
+#endif /* FREEBSD_WIP */
+		if (!dhandle)
+			return false;
+
+		DRM_INFO("%s: Get ACPI handle for \"ATRM\"\n", __func__);
+		status = AcpiGetHandle(dhandle, "ATRM", &atrm_handle);
+		if (!ACPI_FAILURE(status)) {
+			found = true;
+#ifdef FREEBSD_WIP
+			break;
+#endif /* FREEBSD_WIP */
+		} else {
+			DRM_INFO("%s: Failed to get \"ATRM\" handle: %s\n",
+			    __func__, AcpiFormatException(status));
+		}
+	}
+
+	if (!found)
+		return false;
+
+	rdev->bios = malloc(size, DRM_MEM_DRIVER, M_NOWAIT);
+	if (!rdev->bios) {
+		DRM_ERROR("Unable to allocate bios\n");
+		return false;
+	}
+
+	for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+		DRM_INFO("%s: Call radeon_atrm_call()\n", __func__);
+		ret = radeon_atrm_call(atrm_handle,
+				       rdev->bios,
+				       (i * ATRM_BIOS_PAGE),
+				       ATRM_BIOS_PAGE);
+		if (ret < ATRM_BIOS_PAGE)
+			break;
+	}
+
+	if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+		if (i == 0) {
+			DRM_INFO("%s: Incorrect BIOS size\n", __func__);
+		} else {
+			DRM_INFO("%s: Incorrect BIOS signature: 0x%02X%02X\n",
+			    __func__, rdev->bios[0], rdev->bios[1]);
+		}
+		free(rdev->bios, DRM_MEM_DRIVER);
+		return false;
+	}
+	return true;
+}
+#else
+static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+	return false;
+}
+#endif
+
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+	u32 bus_cntl;
+	u32 d1vga_control;
+	u32 d2vga_control;
+	u32 vga_render_control;
+	u32 rom_cntl;
+	bool r;
+
+	DRM_INFO("%s: ===> Try disabled BIOS (ni)...\n", __func__);
+
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
+static bool r700_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t rom_cntl;
+	uint32_t cg_spll_func_cntl = 0;
+	uint32_t cg_spll_status;
+	bool r;
+
+	DRM_INFO("%s: ===> Try disabled BIOS (r700)...\n", __func__);
+
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	if (rdev->family == CHIP_RV730) {
+		cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
+
+		/* enable bypass mode */
+		WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
+						R600_SPLL_BYPASS_EN));
+
+		/* wait for SPLL_CHG_STATUS to change to 1 */
+		cg_spll_status = 0;
+		while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+			cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+
+		WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
+	} else
+		WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	if (rdev->family == CHIP_RV730) {
+		WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
+
+		/* wait for SPLL_CHG_STATUS to change to 1 */
+		cg_spll_status = 0;
+		while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+			cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+	}
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
+static bool r600_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t rom_cntl;
+	uint32_t general_pwrmgt;
+	uint32_t low_vid_lower_gpio_cntl;
+	uint32_t medium_vid_lower_gpio_cntl;
+	uint32_t high_vid_lower_gpio_cntl;
+	uint32_t ctxsw_vid_lower_gpio_cntl;
+	uint32_t lower_gpio_enable;
+	bool r;
+
+	DRM_INFO("%s: ===> Try disabled BIOS (r600)...\n", __func__);
+
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+	general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
+	low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
+	medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
+	high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
+	ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
+	lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	WREG32(R600_ROM_CNTL,
+	       ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
+		(1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
+		R600_SCK_OVERWRITE));
+
+	WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
+	WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
+	       (low_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
+	       (medium_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
+	       (high_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
+	       (ctxsw_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
+	WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
+	WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
+	WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
+	WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
+	WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
+	return r;
+}
+
+static bool avivo_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t seprom_cntl1;
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t gpiopad_a;
+	uint32_t gpiopad_en;
+	uint32_t gpiopad_mask;
+	bool r;
+
+	DRM_INFO("%s: ===> Try disabled BIOS (avivo)...\n", __func__);
+
+	seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(RV370_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+	gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
+	gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
+
+	WREG32(RADEON_SEPROM_CNTL1,
+	       ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+		(0xc << RADEON_SCK_PRESCALE_SHIFT)));
+	WREG32(RADEON_GPIOPAD_A, 0);
+	WREG32(RADEON_GPIOPAD_EN, 0);
+	WREG32(RADEON_GPIOPAD_MASK, 0);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+	/* enable the rom */
+	WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(RV370_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(RADEON_GPIOPAD_A, gpiopad_a);
+	WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
+	WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
+	return r;
+}
+
+static bool legacy_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t seprom_cntl1;
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t crtc_gen_cntl;
+	uint32_t crtc2_gen_cntl;
+	uint32_t crtc_ext_cntl;
+	uint32_t fp2_gen_cntl;
+	bool r;
+
+	DRM_INFO("%s: ===> Try disabled BIOS (legacy)...\n", __func__);
+
+	seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	if (rdev->flags & RADEON_IS_PCIE)
+		bus_cntl = RREG32(RV370_BUS_CNTL);
+	else
+		bus_cntl = RREG32(RADEON_BUS_CNTL);
+	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+	crtc2_gen_cntl = 0;
+	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	fp2_gen_cntl = 0;
+
+#define	PCI_DEVICE_ID_ATI_RADEON_QY	0x5159
+
+	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	}
+
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	}
+
+	WREG32(RADEON_SEPROM_CNTL1,
+	       ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+		(0xc << RADEON_SCK_PRESCALE_SHIFT)));
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+	/* enable the rom */
+	if (rdev->flags & RADEON_IS_PCIE)
+		WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+	else
+		WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+
+	/* Turn off mem requests and CRTC for both controllers */
+	WREG32(RADEON_CRTC_GEN_CNTL,
+	       ((crtc_gen_cntl & ~RADEON_CRTC_EN) |
+		(RADEON_CRTC_DISP_REQ_EN_B |
+		 RADEON_CRTC_EXT_DISP_EN)));
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(RADEON_CRTC2_GEN_CNTL,
+		       ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
+			RADEON_CRTC2_DISP_REQ_EN_B));
+	}
+	/* Turn off CRTC */
+	WREG32(RADEON_CRTC_EXT_CNTL,
+	       ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
+		(RADEON_CRTC_SYNC_TRISTAT |
+		 RADEON_CRTC_DISPLAY_DIS)));
+
+	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
+	}
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	if (rdev->flags & RADEON_IS_PCIE)
+		WREG32(RV370_BUS_CNTL, bus_cntl);
+	else
+		WREG32(RADEON_BUS_CNTL, bus_cntl);
+	WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	}
+	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	}
+	return r;
+}
+
+static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+{
+	if (rdev->flags & RADEON_IS_IGP)
+		return igp_read_bios_from_vram(rdev);
+	else if (rdev->family >= CHIP_BARTS)
+		return ni_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_RV770)
+		return r700_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_R600)
+		return r600_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_RS600)
+		return avivo_read_disabled_bios(rdev);
+	else
+		return legacy_read_disabled_bios(rdev);
+}
+
+#ifdef CONFIG_ACPI
+static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+	bool ret = false;
+	ACPI_TABLE_HEADER *hdr;
+	ACPI_SIZE tbl_size;
+	UEFI_ACPI_VFCT *vfct;
+	GOP_VBIOS_CONTENT *vbios;
+	VFCT_IMAGE_HEADER *vhdr;
+	ACPI_STATUS status;
+
+	DRM_INFO("%s: ===> Try VFCT...\n", __func__);
+
+	DRM_INFO("%s: Get \"VFCT\" ACPI table\n", __func__);
+	status = AcpiGetTable("VFCT", 1, &hdr);
+	if (!ACPI_SUCCESS(status)) {
+		DRM_INFO("%s: Failed to get \"VFCT\" table: %s\n",
+		    __func__, AcpiFormatException(status));
+		return false;
+	}
+	tbl_size = hdr->Length;
+	if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+		goto out_unmap;
+	}
+
+	vfct = (UEFI_ACPI_VFCT *)hdr;
+	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+		goto out_unmap;
+	}
+
+	vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+	vhdr = &vbios->VbiosHeader;
+	DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
+			vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+			vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+
+	if (vhdr->PCIBus != rdev->ddev->pci_bus ||
+	    vhdr->PCIDevice != rdev->ddev->pci_slot ||
+	    vhdr->PCIFunction != rdev->ddev->pci_func ||
+	    vhdr->VendorID != rdev->ddev->pci_vendor ||
+	    vhdr->DeviceID != rdev->ddev->pci_device) {
+		DRM_INFO("ACPI VFCT table is not for this card\n");
+		goto out_unmap;
+	};
+
+	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
+		DRM_ERROR("ACPI VFCT image truncated\n");
+		goto out_unmap;
+	}
+
+	rdev->bios = malloc(vhdr->ImageLength, DRM_MEM_DRIVER, M_NOWAIT);
+	if (rdev->bios)
+		memcpy(rdev->bios, &vbios->VbiosContent, vhdr->ImageLength);
+	ret = !!rdev->bios;
+
+out_unmap:
+	return ret;
+}
+#else
+static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+	return false;
+}
+#endif
+
+bool radeon_get_bios(struct radeon_device *rdev)
+{
+	bool r;
+	uint16_t tmp;
+
+	r = radeon_atrm_get_bios(rdev);
+	if (r == false)
+		r = radeon_acpi_vfct_bios(rdev);
+	if (r == false)
+		r = igp_read_bios_from_vram(rdev);
+	if (r == false)
+		r = radeon_read_bios(rdev);
+	if (r == false) {
+		r = radeon_read_disabled_bios(rdev);
+	}
+	if (r == false || rdev->bios == NULL) {
+		DRM_ERROR("Unable to locate a BIOS ROM\n");
+		rdev->bios = NULL;
+		return false;
+	}
+	if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+		DRM_ERROR("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
+		goto free_bios;
+	}
+
+	tmp = RBIOS16(0x18);
+	if (RBIOS8(tmp + 0x14) != 0x0) {
+		DRM_INFO("Not an x86 BIOS ROM, not using.\n");
+		goto free_bios;
+	}
+
+	rdev->bios_header_start = RBIOS16(0x48);
+	if (!rdev->bios_header_start) {
+		goto free_bios;
+	}
+	tmp = rdev->bios_header_start + 4;
+	if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
+	    !memcmp(rdev->bios + tmp, "MOTA", 4)) {
+		rdev->is_atom_bios = true;
+	} else {
+		rdev->is_atom_bios = false;
+	}
+
+	DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
+	return true;
+free_bios:
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+	return false;
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_bios.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_blit_common.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_blit_common.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_blit_common.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,48 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ * Copyright 2012 Alcatel-Lucent, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RADEON_BLIT_COMMON_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_blit_common.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#define DI_PT_RECTLIST        0x11
+#define DI_INDEX_SIZE_16_BIT  0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8                 0x1
+#define FMT_5_6_5             0x8
+#define FMT_8_8_8_8           0x1a
+#define COLOR_8               0x1
+#define COLOR_5_6_5           0x8
+#define COLOR_8_8_8_8         0x1a
+
+#define RECT_UNIT_H           32
+#define RECT_UNIT_W           (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
+
+#define __RADEON_BLIT_COMMON_H__
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_blit_common.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_clocks.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_clocks.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_clocks.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,918 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_clocks.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/* 10 khz */
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+{
+	struct radeon_pll *spll = &rdev->clock.spll;
+	uint32_t fb_div, ref_div, post_div, sclk;
+
+	fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
+	fb_div <<= 1;
+	fb_div *= spll->reference_freq;
+
+	ref_div =
+	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
+	sclk = fb_div / ref_div;
+
+	post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
+	if (post_div == 2)
+		sclk >>= 1;
+	else if (post_div == 3)
+		sclk >>= 2;
+	else if (post_div == 4)
+		sclk >>= 3;
+
+	return sclk;
+}
+
+/* 10 khz */
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+{
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	uint32_t fb_div, ref_div, post_div, mclk;
+
+	fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
+	fb_div <<= 1;
+	fb_div *= mpll->reference_freq;
+
+	ref_div =
+	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
+	mclk = fb_div / ref_div;
+
+	post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
+	if (post_div == 2)
+		mclk >>= 1;
+	else if (post_div == 3)
+		mclk >>= 2;
+	else if (post_div == 4)
+		mclk >>= 3;
+
+	return mclk;
+}
+
+#ifdef CONFIG_OF
+/*
+ * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
+ * tree. Hopefully, ATI OF driver is kind enough to fill these
+ */
+static bool radeon_read_clocks_OF(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct device_node *dp = rdev->pdev->dev.of_node;
+	const u32 *val;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+
+	if (dp == NULL)
+		return false;
+	val = of_get_property(dp, "ATY,RefCLK", NULL);
+	if (!val || !*val) {
+		printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
+		return false;
+	}
+	p1pll->reference_freq = p2pll->reference_freq = (*val) / 10;
+	p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+	if (p1pll->reference_div < 2)
+		p1pll->reference_div = 12;
+	p2pll->reference_div = p1pll->reference_div;
+
+	/* These aren't in the device-tree */
+	if (rdev->family >= CHIP_R420) {
+		p1pll->pll_in_min = 100;
+		p1pll->pll_in_max = 1350;
+		p1pll->pll_out_min = 20000;
+		p1pll->pll_out_max = 50000;
+		p2pll->pll_in_min = 100;
+		p2pll->pll_in_max = 1350;
+		p2pll->pll_out_min = 20000;
+		p2pll->pll_out_max = 50000;
+	} else {
+		p1pll->pll_in_min = 40;
+		p1pll->pll_in_max = 500;
+		p1pll->pll_out_min = 12500;
+		p1pll->pll_out_max = 35000;
+		p2pll->pll_in_min = 40;
+		p2pll->pll_in_max = 500;
+		p2pll->pll_out_min = 12500;
+		p2pll->pll_out_max = 35000;
+	}
+	/* not sure what the max should be in all cases */
+	rdev->clock.max_pixel_clock = 35000;
+
+	spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
+	spll->reference_div = mpll->reference_div =
+		RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+			    RADEON_M_SPLL_REF_DIV_MASK;
+
+	val = of_get_property(dp, "ATY,SCLK", NULL);
+	if (val && *val)
+		rdev->clock.default_sclk = (*val) / 10;
+	else
+		rdev->clock.default_sclk =
+			radeon_legacy_get_engine_clock(rdev);
+
+	val = of_get_property(dp, "ATY,MCLK", NULL);
+	if (val && *val)
+		rdev->clock.default_mclk = (*val) / 10;
+	else
+		rdev->clock.default_mclk =
+			radeon_legacy_get_memory_clock(rdev);
+
+	DRM_INFO("Using device-tree clock info\n");
+
+	return true;
+}
+#else
+static bool radeon_read_clocks_OF(struct drm_device *dev)
+{
+	return false;
+}
+#endif /* CONFIG_OF */
+
+void radeon_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *dcpll = &rdev->clock.dcpll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	int ret;
+
+	if (rdev->is_atom_bios)
+		ret = radeon_atom_get_clock_info(dev);
+	else
+		ret = radeon_combios_get_clock_info(dev);
+	if (!ret)
+		ret = radeon_read_clocks_OF(dev);
+
+	if (ret) {
+		if (p1pll->reference_div < 2) {
+			if (!ASIC_IS_AVIVO(rdev)) {
+				u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+				if (ASIC_IS_R300(rdev))
+					p1pll->reference_div =
+						(tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+				else
+					p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+				if (p1pll->reference_div < 2)
+					p1pll->reference_div = 12;
+			} else
+				p1pll->reference_div = 12;
+		}
+		if (p2pll->reference_div < 2)
+			p2pll->reference_div = 12;
+		if (rdev->family < CHIP_RS600) {
+			if (spll->reference_div < 2)
+				spll->reference_div =
+					RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+					RADEON_M_SPLL_REF_DIV_MASK;
+		}
+		if (mpll->reference_div < 2)
+			mpll->reference_div = spll->reference_div;
+	} else {
+		if (ASIC_IS_AVIVO(rdev)) {
+			/* TODO FALLBACK */
+		} else {
+			DRM_INFO("Using generic clock info\n");
+
+			/* may need to be per card */
+			rdev->clock.max_pixel_clock = 35000;
+
+			if (rdev->flags & RADEON_IS_IGP) {
+				p1pll->reference_freq = 1432;
+				p2pll->reference_freq = 1432;
+				spll->reference_freq = 1432;
+				mpll->reference_freq = 1432;
+			} else {
+				p1pll->reference_freq = 2700;
+				p2pll->reference_freq = 2700;
+				spll->reference_freq = 2700;
+				mpll->reference_freq = 2700;
+			}
+			p1pll->reference_div =
+			    RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+			if (p1pll->reference_div < 2)
+				p1pll->reference_div = 12;
+			p2pll->reference_div = p1pll->reference_div;
+
+			if (rdev->family >= CHIP_R420) {
+				p1pll->pll_in_min = 100;
+				p1pll->pll_in_max = 1350;
+				p1pll->pll_out_min = 20000;
+				p1pll->pll_out_max = 50000;
+				p2pll->pll_in_min = 100;
+				p2pll->pll_in_max = 1350;
+				p2pll->pll_out_min = 20000;
+				p2pll->pll_out_max = 50000;
+			} else {
+				p1pll->pll_in_min = 40;
+				p1pll->pll_in_max = 500;
+				p1pll->pll_out_min = 12500;
+				p1pll->pll_out_max = 35000;
+				p2pll->pll_in_min = 40;
+				p2pll->pll_in_max = 500;
+				p2pll->pll_out_min = 12500;
+				p2pll->pll_out_max = 35000;
+			}
+
+			spll->reference_div =
+			    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+			    RADEON_M_SPLL_REF_DIV_MASK;
+			mpll->reference_div = spll->reference_div;
+			rdev->clock.default_sclk =
+			    radeon_legacy_get_engine_clock(rdev);
+			rdev->clock.default_mclk =
+			    radeon_legacy_get_memory_clock(rdev);
+		}
+	}
+
+	/* pixel clocks */
+	if (ASIC_IS_AVIVO(rdev)) {
+		p1pll->min_post_div = 2;
+		p1pll->max_post_div = 0x7f;
+		p1pll->min_frac_feedback_div = 0;
+		p1pll->max_frac_feedback_div = 9;
+		p2pll->min_post_div = 2;
+		p2pll->max_post_div = 0x7f;
+		p2pll->min_frac_feedback_div = 0;
+		p2pll->max_frac_feedback_div = 9;
+	} else {
+		p1pll->min_post_div = 1;
+		p1pll->max_post_div = 16;
+		p1pll->min_frac_feedback_div = 0;
+		p1pll->max_frac_feedback_div = 0;
+		p2pll->min_post_div = 1;
+		p2pll->max_post_div = 12;
+		p2pll->min_frac_feedback_div = 0;
+		p2pll->max_frac_feedback_div = 0;
+	}
+
+	/* dcpll is DCE4 only */
+	dcpll->min_post_div = 2;
+	dcpll->max_post_div = 0x7f;
+	dcpll->min_frac_feedback_div = 0;
+	dcpll->max_frac_feedback_div = 9;
+	dcpll->min_ref_div = 2;
+	dcpll->max_ref_div = 0x3ff;
+	dcpll->min_feedback_div = 4;
+	dcpll->max_feedback_div = 0xfff;
+	dcpll->best_vco = 0;
+
+	p1pll->min_ref_div = 2;
+	p1pll->max_ref_div = 0x3ff;
+	p1pll->min_feedback_div = 4;
+	p1pll->max_feedback_div = 0x7ff;
+	p1pll->best_vco = 0;
+
+	p2pll->min_ref_div = 2;
+	p2pll->max_ref_div = 0x3ff;
+	p2pll->min_feedback_div = 4;
+	p2pll->max_feedback_div = 0x7ff;
+	p2pll->best_vco = 0;
+
+	/* system clock */
+	spll->min_post_div = 1;
+	spll->max_post_div = 1;
+	spll->min_ref_div = 2;
+	spll->max_ref_div = 0xff;
+	spll->min_feedback_div = 4;
+	spll->max_feedback_div = 0xff;
+	spll->best_vco = 0;
+
+	/* memory clock */
+	mpll->min_post_div = 1;
+	mpll->max_post_div = 1;
+	mpll->min_ref_div = 2;
+	mpll->max_ref_div = 0xff;
+	mpll->min_feedback_div = 4;
+	mpll->max_feedback_div = 0xff;
+	mpll->best_vco = 0;
+
+	if (!rdev->clock.default_sclk)
+		rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+	if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
+		rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+	rdev->pm.current_sclk = rdev->clock.default_sclk;
+	rdev->pm.current_mclk = rdev->clock.default_mclk;
+
+}
+
+/* 10 khz */
+static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
+				   uint32_t req_clock,
+				   int *fb_div, int *post_div)
+{
+	struct radeon_pll *spll = &rdev->clock.spll;
+	int ref_div = spll->reference_div;
+
+	if (!ref_div)
+		ref_div =
+		    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+		    RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (req_clock < 15000) {
+		*post_div = 8;
+		req_clock *= 8;
+	} else if (req_clock < 30000) {
+		*post_div = 4;
+		req_clock *= 4;
+	} else if (req_clock < 60000) {
+		*post_div = 2;
+		req_clock *= 2;
+	} else
+		*post_div = 1;
+
+	req_clock *= ref_div;
+	req_clock += spll->reference_freq;
+	req_clock /= (2 * spll->reference_freq);
+
+	*fb_div = req_clock & 0xff;
+
+	req_clock = (req_clock & 0xffff) << 1;
+	req_clock *= spll->reference_freq;
+	req_clock /= ref_div;
+	req_clock /= *post_div;
+
+	return req_clock;
+}
+
+/* 10 khz */
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
+				    uint32_t eng_clock)
+{
+	uint32_t tmp;
+	int fb_div, post_div;
+
+	/* XXX: wait for idle */
+
+	eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
+
+	tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+	tmp &= ~RADEON_DONT_USE_XTALIN;
+	WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+	tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+	tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+	WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+	udelay(10);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp |= RADEON_SPLL_SLEEP;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(2);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp |= RADEON_SPLL_RESET;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(200);
+
+	tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
+	tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
+	WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
+
+	/* XXX: verify on different asics */
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_PVG_MASK;
+	if ((eng_clock * post_div) >= 90000)
+		tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
+	else
+		tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_SLEEP;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(2);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_RESET;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(200);
+
+	tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+	tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+	switch (post_div) {
+	case 1:
+	default:
+		tmp |= 1;
+		break;
+	case 2:
+		tmp |= 2;
+		break;
+	case 4:
+		tmp |= 3;
+		break;
+	case 8:
+		tmp |= 4;
+		break;
+	}
+	WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+	udelay(20);
+
+	tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+	tmp |= RADEON_DONT_USE_XTALIN;
+	WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+	udelay(10);
+}
+
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+	uint32_t tmp;
+
+	if (enable) {
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			if ((RREG32(RADEON_CONFIG_CNTL) &
+			     RADEON_CFG_ATI_REV_ID_MASK) >
+			    RADEON_CFG_ATI_REV_A13) {
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_RB);
+			}
+			tmp &=
+			    ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
+			      RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
+			      RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
+			      RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
+			      RADEON_SCLK_FORCE_TDM);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+		} else if (ASIC_IS_R300(rdev)) {
+			if ((rdev->family == CHIP_RS400) ||
+			    (rdev->family == CHIP_RS480)) {
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_DISP2 |
+				      RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_HDP |
+				      RADEON_SCLK_FORCE_DISP1 |
+				      RADEON_SCLK_FORCE_TOP |
+				      RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+				      | RADEON_SCLK_FORCE_IDCT |
+				      RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+				      | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+				      | R300_SCLK_FORCE_US |
+				      RADEON_SCLK_FORCE_TV_SCLK |
+				      R300_SCLK_FORCE_SU |
+				      RADEON_SCLK_FORCE_OV0);
+				tmp |= RADEON_DYN_STOP_LAT_MASK;
+				tmp |=
+				    RADEON_SCLK_FORCE_TOP |
+				    RADEON_SCLK_FORCE_VIP;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+				tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+				tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+				tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+					RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+					RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+					R300_DVOCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+					RADEON_PIXCLK_GV_ALWAYS_ONb |
+					R300_PIXCLK_DVO_ALWAYS_ONb |
+					RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+					RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+					R300_PIXCLK_TRANS_ALWAYS_ONb |
+					R300_PIXCLK_TVO_ALWAYS_ONb |
+					R300_P2G2CLK_ALWAYS_ONb |
+					R300_P2G2CLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			} else if (rdev->family >= CHIP_RV350) {
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp &= ~(R300_SCLK_FORCE_TCL |
+					 R300_SCLK_FORCE_GA |
+					 R300_SCLK_FORCE_CBA);
+				tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
+					R300_SCLK_GA_MAX_DYN_STOP_LAT |
+					R300_SCLK_CBA_MAX_DYN_STOP_LAT);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_DISP2 |
+				      RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_HDP |
+				      RADEON_SCLK_FORCE_DISP1 |
+				      RADEON_SCLK_FORCE_TOP |
+				      RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+				      | RADEON_SCLK_FORCE_IDCT |
+				      RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+				      | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+				      | R300_SCLK_FORCE_US |
+				      RADEON_SCLK_FORCE_TV_SCLK |
+				      R300_SCLK_FORCE_SU |
+				      RADEON_SCLK_FORCE_OV0);
+				tmp |= RADEON_DYN_STOP_LAT_MASK;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+				tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+				tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+				tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+					RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+					RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+					R300_DVOCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+					RADEON_PIXCLK_GV_ALWAYS_ONb |
+					R300_PIXCLK_DVO_ALWAYS_ONb |
+					RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+					RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+					R300_PIXCLK_TRANS_ALWAYS_ONb |
+					R300_PIXCLK_TVO_ALWAYS_ONb |
+					R300_P2G2CLK_ALWAYS_ONb |
+					R300_P2G2CLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_MCLK_MISC);
+				tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
+					RADEON_IO_MCLK_DYN_ENABLE);
+				WREG32_PLL(RADEON_MCLK_MISC, tmp);
+
+				tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+				tmp |= (RADEON_FORCEON_MCLKA |
+					RADEON_FORCEON_MCLKB);
+
+				tmp &= ~(RADEON_FORCEON_YCLKA |
+					 RADEON_FORCEON_YCLKB |
+					 RADEON_FORCEON_MC);
+
+				/* Some releases of vbios have set DISABLE_MC_MCLKA
+				   and DISABLE_MC_MCLKB bits in the vbios table.  Setting these
+				   bits will cause H/W hang when reading video memory with dynamic clocking
+				   enabled. */
+				if ((tmp & R300_DISABLE_MC_MCLKA) &&
+				    (tmp & R300_DISABLE_MC_MCLKB)) {
+					/* If both bits are set, then check the active channels */
+					tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+					if (rdev->mc.vram_width == 64) {
+						if (RREG32(RADEON_MEM_CNTL) &
+						    R300_MEM_USE_CD_CH_ONLY)
+							tmp &=
+							    ~R300_DISABLE_MC_MCLKB;
+						else
+							tmp &=
+							    ~R300_DISABLE_MC_MCLKA;
+					} else {
+						tmp &= ~(R300_DISABLE_MC_MCLKA |
+							 R300_DISABLE_MC_MCLKB);
+					}
+				}
+
+				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+			} else {
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &= ~(R300_SCLK_FORCE_VAP);
+				tmp |= RADEON_SCLK_FORCE_CP;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+				mdelay(15);
+
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp &= ~(R300_SCLK_FORCE_TCL |
+					 R300_SCLK_FORCE_GA |
+					 R300_SCLK_FORCE_CBA);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+			}
+		} else {
+			tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+
+			tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
+				 RADEON_DISP_DYN_STOP_LAT_MASK |
+				 RADEON_DYN_STOP_MODE_MASK);
+
+			tmp |= (RADEON_ENGIN_DYNCLK_MODE |
+				(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
+			WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
+			mdelay(15);
+
+			tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+			tmp |= RADEON_SCLK_DYN_START_CNTL;
+			WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+			mdelay(15);
+
+			/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
+			   to lockup randomly, leave them as set by BIOS.
+			 */
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			/*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
+			tmp &= ~RADEON_SCLK_FORCEON_MASK;
+
+			/*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
+			if (((rdev->family == CHIP_RV250) &&
+			     ((RREG32(RADEON_CONFIG_CNTL) &
+			       RADEON_CFG_ATI_REV_ID_MASK) <
+			      RADEON_CFG_ATI_REV_A13))
+			    || ((rdev->family == CHIP_RV100)
+				&&
+				((RREG32(RADEON_CONFIG_CNTL) &
+				  RADEON_CFG_ATI_REV_ID_MASK) <=
+				 RADEON_CFG_ATI_REV_A13))) {
+				tmp |= RADEON_SCLK_FORCE_CP;
+				tmp |= RADEON_SCLK_FORCE_VIP;
+			}
+
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			if ((rdev->family == CHIP_RV200) ||
+			    (rdev->family == CHIP_RV250) ||
+			    (rdev->family == CHIP_RV280)) {
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+
+				/* RV200::A11 A12 RV250::A11 A12 */
+				if (((rdev->family == CHIP_RV200) ||
+				     (rdev->family == CHIP_RV250)) &&
+				    ((RREG32(RADEON_CONFIG_CNTL) &
+				      RADEON_CFG_ATI_REV_ID_MASK) <
+				     RADEON_CFG_ATI_REV_A13)) {
+					tmp |= RADEON_SCLK_MORE_FORCEON;
+				}
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+				mdelay(15);
+			}
+
+			/* RV200::A11 A12, RV250::A11 A12 */
+			if (((rdev->family == CHIP_RV200) ||
+			     (rdev->family == CHIP_RV250)) &&
+			    ((RREG32(RADEON_CONFIG_CNTL) &
+			      RADEON_CFG_ATI_REV_ID_MASK) <
+			     RADEON_CFG_ATI_REV_A13)) {
+				tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+				tmp |= RADEON_TCL_BYPASS_DISABLE;
+				WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+			}
+			mdelay(15);
+
+			/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+				RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				RADEON_PIXCLK_GV_ALWAYS_ONb |
+				RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+				RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			mdelay(15);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+				RADEON_PIXCLK_DAC_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+			mdelay(15);
+		}
+	} else {
+		/* Turn everything OFF (ForceON to everything) */
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
+				RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
+				| RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
+				RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
+				RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
+				RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
+				RADEON_SCLK_FORCE_RB);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+		} else if ((rdev->family == CHIP_RS400) ||
+			   (rdev->family == CHIP_RS480)) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+				RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+				| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+				R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+				RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+				R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+				R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+				R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+			tmp |= RADEON_SCLK_MORE_FORCEON;
+			WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+				 R300_DVOCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 R300_PIXCLK_DVO_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+				 R300_PIXCLK_TRANS_ALWAYS_ONb |
+				 R300_PIXCLK_TVO_ALWAYS_ONb |
+				 R300_P2G2CLK_ALWAYS_ONb |
+				 R300_P2G2CLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+		} else if (rdev->family >= CHIP_RV350) {
+			/* for RV350/M10, no delays are required. */
+			tmp = RREG32_PLL(R300_SCLK_CNTL2);
+			tmp |= (R300_SCLK_FORCE_TCL |
+				R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
+			WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+				RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+				| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+				R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+				RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+				R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+				R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+				R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+			tmp |= RADEON_SCLK_MORE_FORCEON;
+			WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+			tmp |= (RADEON_FORCEON_MCLKA |
+				RADEON_FORCEON_MCLKB |
+				RADEON_FORCEON_YCLKA |
+				RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
+			WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+				 R300_DVOCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 R300_PIXCLK_DVO_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+				 R300_PIXCLK_TRANS_ALWAYS_ONb |
+				 R300_PIXCLK_TVO_ALWAYS_ONb |
+				 R300_P2G2CLK_ALWAYS_ONb |
+				 R300_P2G2CLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+		} else {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
+			tmp |= RADEON_SCLK_FORCE_SE;
+
+			if (rdev->flags & RADEON_SINGLE_CRTC) {
+				tmp |= (RADEON_SCLK_FORCE_RB |
+					RADEON_SCLK_FORCE_TDM |
+					RADEON_SCLK_FORCE_TAM |
+					RADEON_SCLK_FORCE_PB |
+					RADEON_SCLK_FORCE_RE |
+					RADEON_SCLK_FORCE_VIP |
+					RADEON_SCLK_FORCE_IDCT |
+					RADEON_SCLK_FORCE_TOP |
+					RADEON_SCLK_FORCE_DISP1 |
+					RADEON_SCLK_FORCE_DISP2 |
+					RADEON_SCLK_FORCE_HDP);
+			} else if ((rdev->family == CHIP_R300) ||
+				   (rdev->family == CHIP_R350)) {
+				tmp |= (RADEON_SCLK_FORCE_HDP |
+					RADEON_SCLK_FORCE_DISP1 |
+					RADEON_SCLK_FORCE_DISP2 |
+					RADEON_SCLK_FORCE_TOP |
+					RADEON_SCLK_FORCE_IDCT |
+					RADEON_SCLK_FORCE_VIP);
+			}
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			mdelay(16);
+
+			if ((rdev->family == CHIP_R300) ||
+			    (rdev->family == CHIP_R350)) {
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp |= (R300_SCLK_FORCE_TCL |
+					R300_SCLK_FORCE_GA |
+					R300_SCLK_FORCE_CBA);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+				mdelay(16);
+			}
+
+			if (rdev->flags & RADEON_IS_IGP) {
+				tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+				tmp &= ~(RADEON_FORCEON_MCLKA |
+					 RADEON_FORCEON_YCLKA);
+				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+				mdelay(16);
+			}
+
+			if ((rdev->family == CHIP_RV200) ||
+			    (rdev->family == CHIP_RV250) ||
+			    (rdev->family == CHIP_RV280)) {
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp |= RADEON_SCLK_MORE_FORCEON;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+				mdelay(16);
+			}
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			mdelay(16);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+		}
+	}
+}
+


Property changes on: trunk/sys/dev/drm2/radeon/radeon_clocks.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_combios.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_combios.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_combios.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,3694 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2004 ATI Technologies Inc., Markham, Ontario
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_combios.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+#ifdef CONFIG_PPC_PMAC
+/* not sure which of these are needed */
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#endif /* CONFIG_PPC_PMAC */
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+			uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+			    uint32_t connector_id,
+			    uint32_t supported_device,
+			    int connector_type,
+			    struct radeon_i2c_bus_rec *i2c_bus,
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			  uint32_t supported_device);
+#endif
+
+/* old legacy ATI BIOS routines */
+
+/* COMBIOS table offsets */
+enum radeon_combios_table_offset {
+	/* absolute offset tables */
+	COMBIOS_ASIC_INIT_1_TABLE,
+	COMBIOS_BIOS_SUPPORT_TABLE,
+	COMBIOS_DAC_PROGRAMMING_TABLE,
+	COMBIOS_MAX_COLOR_DEPTH_TABLE,
+	COMBIOS_CRTC_INFO_TABLE,
+	COMBIOS_PLL_INFO_TABLE,
+	COMBIOS_TV_INFO_TABLE,
+	COMBIOS_DFP_INFO_TABLE,
+	COMBIOS_HW_CONFIG_INFO_TABLE,
+	COMBIOS_MULTIMEDIA_INFO_TABLE,
+	COMBIOS_TV_STD_PATCH_TABLE,
+	COMBIOS_LCD_INFO_TABLE,
+	COMBIOS_MOBILE_INFO_TABLE,
+	COMBIOS_PLL_INIT_TABLE,
+	COMBIOS_MEM_CONFIG_TABLE,
+	COMBIOS_SAVE_MASK_TABLE,
+	COMBIOS_HARDCODED_EDID_TABLE,
+	COMBIOS_ASIC_INIT_2_TABLE,
+	COMBIOS_CONNECTOR_INFO_TABLE,
+	COMBIOS_DYN_CLK_1_TABLE,
+	COMBIOS_RESERVED_MEM_TABLE,
+	COMBIOS_EXT_TMDS_INFO_TABLE,
+	COMBIOS_MEM_CLK_INFO_TABLE,
+	COMBIOS_EXT_DAC_INFO_TABLE,
+	COMBIOS_MISC_INFO_TABLE,
+	COMBIOS_CRT_INFO_TABLE,
+	COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE,
+	COMBIOS_COMPONENT_VIDEO_INFO_TABLE,
+	COMBIOS_FAN_SPEED_INFO_TABLE,
+	COMBIOS_OVERDRIVE_INFO_TABLE,
+	COMBIOS_OEM_INFO_TABLE,
+	COMBIOS_DYN_CLK_2_TABLE,
+	COMBIOS_POWER_CONNECTOR_INFO_TABLE,
+	COMBIOS_I2C_INFO_TABLE,
+	/* relative offset tables */
+	COMBIOS_ASIC_INIT_3_TABLE,	/* offset from misc info */
+	COMBIOS_ASIC_INIT_4_TABLE,	/* offset from misc info */
+	COMBIOS_DETECTED_MEM_TABLE,	/* offset from misc info */
+	COMBIOS_ASIC_INIT_5_TABLE,	/* offset from misc info */
+	COMBIOS_RAM_RESET_TABLE,	/* offset from mem config */
+	COMBIOS_POWERPLAY_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_GPIO_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_LCD_DDC_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_TMDS_POWER_TABLE,	/* offset from mobile info */
+	COMBIOS_TMDS_POWER_ON_TABLE,	/* offset from tmds power */
+	COMBIOS_TMDS_POWER_OFF_TABLE,	/* offset from tmds power */
+};
+
+enum radeon_combios_ddc {
+	DDC_NONE_DETECTED,
+	DDC_MONID,
+	DDC_DVI,
+	DDC_VGA,
+	DDC_CRT2,
+	DDC_LCD,
+	DDC_GPIO,
+};
+
+enum radeon_combios_connector {
+	CONNECTOR_NONE_LEGACY,
+	CONNECTOR_PROPRIETARY_LEGACY,
+	CONNECTOR_CRT_LEGACY,
+	CONNECTOR_DVI_I_LEGACY,
+	CONNECTOR_DVI_D_LEGACY,
+	CONNECTOR_CTV_LEGACY,
+	CONNECTOR_STV_LEGACY,
+	CONNECTOR_UNSUPPORTED_LEGACY
+};
+
+const int legacy_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Unknown,
+};
+
+static uint16_t combios_get_table_offset(struct drm_device *dev,
+					 enum radeon_combios_table_offset table)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int rev;
+	uint16_t offset = 0, check_offset;
+
+	if (!rdev->bios)
+		return 0;
+
+	switch (table) {
+		/* absolute offset tables */
+	case COMBIOS_ASIC_INIT_1_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0xc);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_BIOS_SUPPORT_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x14);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_DAC_PROGRAMMING_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_CRTC_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_PLL_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x30);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_TV_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x32);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_DFP_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x34);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_HW_CONFIG_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x36);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_MULTIMEDIA_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x38);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_TV_STD_PATCH_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_LCD_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x40);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_MOBILE_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x42);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_PLL_INIT_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x46);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_MEM_CONFIG_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x48);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_SAVE_MASK_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_HARDCODED_EDID_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_ASIC_INIT_2_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_CONNECTOR_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x50);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_DYN_CLK_1_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x52);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_RESERVED_MEM_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x54);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_EXT_TMDS_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x58);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_MEM_CLK_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_EXT_DAC_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_MISC_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_CRT_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x60);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x62);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x64);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_FAN_SPEED_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x66);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_OVERDRIVE_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x68);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_OEM_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_DYN_CLK_2_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
+		if (check_offset)
+			offset = check_offset;
+		break;
+	case COMBIOS_I2C_INFO_TABLE:
+		check_offset = RBIOS16(rdev->bios_header_start + 0x70);
+		if (check_offset)
+			offset = check_offset;
+		break;
+		/* relative offset tables */
+	case COMBIOS_ASIC_INIT_3_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x3);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_ASIC_INIT_4_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x5);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_DETECTED_MEM_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x7);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_ASIC_INIT_5_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev == 2) {
+				check_offset = RBIOS16(check_offset + 0x9);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_RAM_RESET_TABLE:	/* offset from mem config */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+		if (check_offset) {
+			while (RBIOS8(check_offset++));
+			check_offset += 2;
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_POWERPLAY_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x11);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_GPIO_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x13);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_LCD_DDC_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x15);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x17);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_ON_TABLE:	/* offset from tmds power */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x2);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_OFF_TABLE:	/* offset from tmds power */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x4);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return offset;
+
+}
+
+bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+{
+	int edid_info, size;
+	struct edid *edid;
+	unsigned char *raw;
+	edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+	if (!edid_info)
+		return false;
+
+	raw = rdev->bios + edid_info;
+	size = EDID_LENGTH * (raw[0x7e] + 1);
+	edid = malloc(size, DRM_MEM_KMS, M_NOWAIT);
+	if (edid == NULL)
+		return false;
+
+	memcpy((unsigned char *)edid, raw, size);
+
+	if (!drm_edid_is_valid(edid)) {
+		free(edid, DRM_MEM_KMS);
+		return false;
+	}
+
+	rdev->mode_info.bios_hardcoded_edid = edid;
+	rdev->mode_info.bios_hardcoded_edid_size = size;
+	return true;
+}
+
+/* this is used for atom LCDs as well */
+struct edid *
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
+{
+	struct edid *edid;
+
+	if (rdev->mode_info.bios_hardcoded_edid) {
+		edid = malloc(rdev->mode_info.bios_hardcoded_edid_size,
+		    DRM_MEM_KMS, M_NOWAIT);
+		if (edid) {
+			memcpy((unsigned char *)edid,
+			       (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
+			       rdev->mode_info.bios_hardcoded_edid_size);
+			return edid;
+		}
+	}
+	return NULL;
+}
+
+static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
+						       enum radeon_combios_ddc ddc,
+						       u32 clk_mask,
+						       u32 data_mask)
+{
+	struct radeon_i2c_bus_rec i2c;
+	int ddc_line = 0;
+
+	/* ddc id            = mask reg
+	 * DDC_NONE_DETECTED = none
+	 * DDC_DVI           = RADEON_GPIO_DVI_DDC
+	 * DDC_VGA           = RADEON_GPIO_VGA_DDC
+	 * DDC_LCD           = RADEON_GPIOPAD_MASK
+	 * DDC_GPIO          = RADEON_MDGPIO_MASK
+	 * r1xx
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_CRT2_DDC
+	 * r200
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_DVI_DDC
+	 * r300/r350
+	 * DDC_MONID         = RADEON_GPIO_DVI_DDC
+	 * DDC_CRT2          = RADEON_GPIO_DVI_DDC
+	 * rv2xx/rv3xx
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_MONID
+	 * rs3xx/rs4xx
+	 * DDC_MONID         = RADEON_GPIOPAD_MASK
+	 * DDC_CRT2          = RADEON_GPIO_MONID
+	 */
+	switch (ddc) {
+	case DDC_NONE_DETECTED:
+	default:
+		ddc_line = 0;
+		break;
+	case DDC_DVI:
+		ddc_line = RADEON_GPIO_DVI_DDC;
+		break;
+	case DDC_VGA:
+		ddc_line = RADEON_GPIO_VGA_DDC;
+		break;
+	case DDC_LCD:
+		ddc_line = RADEON_GPIOPAD_MASK;
+		break;
+	case DDC_GPIO:
+		ddc_line = RADEON_MDGPIO_MASK;
+		break;
+	case DDC_MONID:
+		if (rdev->family == CHIP_RS300 ||
+		    rdev->family == CHIP_RS400 ||
+		    rdev->family == CHIP_RS480)
+			ddc_line = RADEON_GPIOPAD_MASK;
+		else if (rdev->family == CHIP_R300 ||
+			 rdev->family == CHIP_R350) {
+			ddc_line = RADEON_GPIO_DVI_DDC;
+			ddc = DDC_DVI;
+		} else
+			ddc_line = RADEON_GPIO_MONID;
+		break;
+	case DDC_CRT2:
+		if (rdev->family == CHIP_R200 ||
+		    rdev->family == CHIP_R300 ||
+		    rdev->family == CHIP_R350) {
+			ddc_line = RADEON_GPIO_DVI_DDC;
+			ddc = DDC_DVI;
+		} else if (rdev->family == CHIP_RS300 ||
+			   rdev->family == CHIP_RS400 ||
+			   rdev->family == CHIP_RS480)
+			ddc_line = RADEON_GPIO_MONID;
+		else if (rdev->family >= CHIP_RV350) {
+			ddc_line = RADEON_GPIO_MONID;
+			ddc = DDC_MONID;
+		} else
+			ddc_line = RADEON_GPIO_CRT2_DDC;
+		break;
+	}
+
+	if (ddc_line == RADEON_GPIOPAD_MASK) {
+		i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
+		i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
+		i2c.a_clk_reg = RADEON_GPIOPAD_A;
+		i2c.a_data_reg = RADEON_GPIOPAD_A;
+		i2c.en_clk_reg = RADEON_GPIOPAD_EN;
+		i2c.en_data_reg = RADEON_GPIOPAD_EN;
+		i2c.y_clk_reg = RADEON_GPIOPAD_Y;
+		i2c.y_data_reg = RADEON_GPIOPAD_Y;
+	} else if (ddc_line == RADEON_MDGPIO_MASK) {
+		i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
+		i2c.mask_data_reg = RADEON_MDGPIO_MASK;
+		i2c.a_clk_reg = RADEON_MDGPIO_A;
+		i2c.a_data_reg = RADEON_MDGPIO_A;
+		i2c.en_clk_reg = RADEON_MDGPIO_EN;
+		i2c.en_data_reg = RADEON_MDGPIO_EN;
+		i2c.y_clk_reg = RADEON_MDGPIO_Y;
+		i2c.y_data_reg = RADEON_MDGPIO_Y;
+	} else {
+		i2c.mask_clk_reg = ddc_line;
+		i2c.mask_data_reg = ddc_line;
+		i2c.a_clk_reg = ddc_line;
+		i2c.a_data_reg = ddc_line;
+		i2c.en_clk_reg = ddc_line;
+		i2c.en_data_reg = ddc_line;
+		i2c.y_clk_reg = ddc_line;
+		i2c.y_data_reg = ddc_line;
+	}
+
+	if (clk_mask && data_mask) {
+		/* system specific masks */
+		i2c.mask_clk_mask = clk_mask;
+		i2c.mask_data_mask = data_mask;
+		i2c.a_clk_mask = clk_mask;
+		i2c.a_data_mask = data_mask;
+		i2c.en_clk_mask = clk_mask;
+		i2c.en_data_mask = data_mask;
+		i2c.y_clk_mask = clk_mask;
+		i2c.y_data_mask = data_mask;
+	} else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
+		   (ddc_line == RADEON_MDGPIO_MASK)) {
+		/* default gpiopad masks */
+		i2c.mask_clk_mask = (0x20 << 8);
+		i2c.mask_data_mask = 0x80;
+		i2c.a_clk_mask = (0x20 << 8);
+		i2c.a_data_mask = 0x80;
+		i2c.en_clk_mask = (0x20 << 8);
+		i2c.en_data_mask = 0x80;
+		i2c.y_clk_mask = (0x20 << 8);
+		i2c.y_data_mask = 0x80;
+	} else {
+		/* default masks for ddc pads */
+		i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
+		i2c.mask_data_mask = RADEON_GPIO_MASK_0;
+		i2c.a_clk_mask = RADEON_GPIO_A_1;
+		i2c.a_data_mask = RADEON_GPIO_A_0;
+		i2c.en_clk_mask = RADEON_GPIO_EN_1;
+		i2c.en_data_mask = RADEON_GPIO_EN_0;
+		i2c.y_clk_mask = RADEON_GPIO_Y_1;
+		i2c.y_data_mask = RADEON_GPIO_Y_0;
+	}
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_RS300:
+		switch (ddc_line) {
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_R200:
+		switch (ddc_line) {
+		case RADEON_GPIO_DVI_DDC:
+		case RADEON_GPIO_MONID:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_RV250:
+	case CHIP_RV280:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+		case RADEON_GPIO_CRT2_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_R300:
+	case CHIP_R350:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_RV350:
+	case CHIP_RV380:
+	case CHIP_RS400:
+	case CHIP_RS480:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		case RADEON_GPIO_MONID:
+			/* hw i2c on RADEON_GPIO_MONID doesn't seem to work
+			 * reliably on some pre-r4xx hardware; not sure why.
+			 */
+			i2c.hw_capable = false;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	default:
+		i2c.hw_capable = false;
+		break;
+	}
+	i2c.mm_i2c = false;
+
+	i2c.i2c_id = ddc;
+	i2c.hpd = RADEON_HPD_NONE;
+
+	if (ddc_line)
+		i2c.valid = true;
+	else
+		i2c.valid = false;
+
+	return i2c;
+}
+
+static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_i2c_bus_rec i2c;
+	u16 offset;
+	u8 id, blocks, clk, data;
+	int i;
+
+	i2c.valid = false;
+
+	offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+	if (offset) {
+		blocks = RBIOS8(offset + 2);
+		for (i = 0; i < blocks; i++) {
+			id = RBIOS8(offset + 3 + (i * 5) + 0);
+			if (id == 136) {
+				clk = RBIOS8(offset + 3 + (i * 5) + 3);
+				data = RBIOS8(offset + 3 + (i * 5) + 4);
+				/* gpiopad */
+				i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+							    (1 << clk), (1 << data));
+				break;
+			}
+		}
+	}
+	return i2c;
+}
+
+void radeon_combios_i2c_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_i2c_bus_rec i2c;
+
+	/* actual hw pads
+	 * r1xx/rs2xx/rs3xx
+	 * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm
+	 * r200
+	 * 0x60, 0x64, 0x68, mm
+	 * r300/r350
+	 * 0x60, 0x64, mm
+	 * rv2xx/rv3xx/rs4xx
+	 * 0x60, 0x64, 0x68, gpiopads, mm
+	 */
+
+	/* 0x60 */
+	i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+	rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
+	/* 0x64 */
+	i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+	rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
+
+	/* mm i2c */
+	i2c.valid = true;
+	i2c.hw_capable = true;
+	i2c.mm_i2c = true;
+	i2c.i2c_id = 0xa0;
+	rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
+
+	if (rdev->family == CHIP_R300 ||
+	    rdev->family == CHIP_R350) {
+		/* only 2 sw i2c pads */
+	} else if (rdev->family == CHIP_RS300 ||
+		   rdev->family == CHIP_RS400 ||
+		   rdev->family == CHIP_RS480) {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+
+		/* gpiopad */
+		i2c = radeon_combios_get_i2c_info_from_table(rdev);
+		if (i2c.valid)
+			rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
+	} else if ((rdev->family == CHIP_R200) ||
+		   (rdev->family >= CHIP_R300)) {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+	} else {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+		/* 0x6c */
+		i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
+	}
+}
+
+bool radeon_combios_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t pll_info;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	int8_t rev;
+	uint16_t sclk, mclk;
+
+	pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
+	if (pll_info) {
+		rev = RBIOS8(pll_info);
+
+		/* pixel clocks */
+		p1pll->reference_freq = RBIOS16(pll_info + 0xe);
+		p1pll->reference_div = RBIOS16(pll_info + 0x10);
+		p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
+		p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+		p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+		p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+
+		if (rev > 9) {
+			p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
+			p1pll->pll_in_max = RBIOS32(pll_info + 0x3a);
+		} else {
+			p1pll->pll_in_min = 40;
+			p1pll->pll_in_max = 500;
+		}
+		*p2pll = *p1pll;
+
+		/* system clock */
+		spll->reference_freq = RBIOS16(pll_info + 0x1a);
+		spll->reference_div = RBIOS16(pll_info + 0x1c);
+		spll->pll_out_min = RBIOS32(pll_info + 0x1e);
+		spll->pll_out_max = RBIOS32(pll_info + 0x22);
+
+		if (rev > 10) {
+			spll->pll_in_min = RBIOS32(pll_info + 0x48);
+			spll->pll_in_max = RBIOS32(pll_info + 0x4c);
+		} else {
+			/* ??? */
+			spll->pll_in_min = 40;
+			spll->pll_in_max = 500;
+		}
+
+		/* memory clock */
+		mpll->reference_freq = RBIOS16(pll_info + 0x26);
+		mpll->reference_div = RBIOS16(pll_info + 0x28);
+		mpll->pll_out_min = RBIOS32(pll_info + 0x2a);
+		mpll->pll_out_max = RBIOS32(pll_info + 0x2e);
+
+		if (rev > 10) {
+			mpll->pll_in_min = RBIOS32(pll_info + 0x5a);
+			mpll->pll_in_max = RBIOS32(pll_info + 0x5e);
+		} else {
+			/* ??? */
+			mpll->pll_in_min = 40;
+			mpll->pll_in_max = 500;
+		}
+
+		/* default sclk/mclk */
+		sclk = RBIOS16(pll_info + 0xa);
+		mclk = RBIOS16(pll_info + 0x8);
+		if (sclk == 0)
+			sclk = 200 * 100;
+		if (mclk == 0)
+			mclk = 200 * 100;
+
+		rdev->clock.default_sclk = sclk;
+		rdev->clock.default_mclk = mclk;
+
+		if (RBIOS32(pll_info + 0x16))
+			rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
+		else
+			rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
+
+		return true;
+	}
+	return false;
+}
+
+bool radeon_combios_sideport_present(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	u16 igp_info;
+
+	/* sideport is AMD only */
+	if (rdev->family == CHIP_RS400)
+		return false;
+
+	igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
+
+	if (igp_info) {
+		if (RBIOS16(igp_info + 0x4))
+			return true;
+	}
+	return false;
+}
+
+static const uint32_t default_primarydac_adj[CHIP_LAST] = {
+	0x00000808,		/* r100  */
+	0x00000808,		/* rv100 */
+	0x00000808,		/* rs100 */
+	0x00000808,		/* rv200 */
+	0x00000808,		/* rs200 */
+	0x00000808,		/* r200  */
+	0x00000808,		/* rv250 */
+	0x00000000,		/* rs300 */
+	0x00000808,		/* rv280 */
+	0x00000808,		/* r300  */
+	0x00000808,		/* r350  */
+	0x00000808,		/* rv350 */
+	0x00000808,		/* rv380 */
+	0x00000808,		/* r420  */
+	0x00000808,		/* r423  */
+	0x00000808,		/* rv410 */
+	0x00000000,		/* rs400 */
+	0x00000000,		/* rs480 */
+};
+
+static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
+							  struct radeon_encoder_primary_dac *p_dac)
+{
+	p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
+	return;
+}
+
+struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+								       radeon_encoder
+								       *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t dac_info;
+	uint8_t rev, bg, dac;
+	struct radeon_encoder_primary_dac *p_dac = NULL;
+	int found = 0;
+
+	p_dac = malloc(sizeof(struct radeon_encoder_primary_dac),
+			DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+	if (!p_dac)
+		return NULL;
+
+	/* check CRT table */
+	dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+	if (dac_info) {
+		rev = RBIOS8(dac_info) & 0x3;
+		if (rev < 2) {
+			bg = RBIOS8(dac_info + 0x2) & 0xf;
+			dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf;
+			p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+		} else {
+			bg = RBIOS8(dac_info + 0x2) & 0xf;
+			dac = RBIOS8(dac_info + 0x3) & 0xf;
+			p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+		}
+		/* if the values are all zeros, use the table */
+		if (p_dac->ps2_pdac_adj)
+			found = 1;
+	}
+
+	/* quirks */
+	/* Radeon 9100 (R200) */
+	if ((dev->pci_device == 0x514D) &&
+	    (dev->pci_subvendor == 0x174B) &&
+	    (dev->pci_subdevice == 0x7149)) {
+		/* vbios value is bad, use the default */
+		found = 0;
+	}
+
+	if (!found) /* fallback to defaults */
+		radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
+	return p_dac;
+}
+
+enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	uint16_t tv_info;
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+	tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+	if (tv_info) {
+		if (RBIOS8(tv_info + 6) == 'T') {
+			switch (RBIOS8(tv_info + 7) & 0xf) {
+			case 1:
+				tv_std = TV_STD_NTSC;
+				DRM_DEBUG_KMS("Default TV standard: NTSC\n");
+				break;
+			case 2:
+				tv_std = TV_STD_PAL;
+				DRM_DEBUG_KMS("Default TV standard: PAL\n");
+				break;
+			case 3:
+				tv_std = TV_STD_PAL_M;
+				DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
+				break;
+			case 4:
+				tv_std = TV_STD_PAL_60;
+				DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
+				break;
+			case 5:
+				tv_std = TV_STD_NTSC_J;
+				DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
+				break;
+			case 6:
+				tv_std = TV_STD_SCART_PAL;
+				DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
+				break;
+			default:
+				tv_std = TV_STD_NTSC;
+				DRM_DEBUG_KMS
+				    ("Unknown TV standard; defaulting to NTSC\n");
+				break;
+			}
+
+			switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
+			case 0:
+				DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
+				break;
+			case 1:
+				DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
+				break;
+			case 2:
+				DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
+				break;
+			case 3:
+				DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	return tv_std;
+}
+
+static const uint32_t default_tvdac_adj[CHIP_LAST] = {
+	0x00000000,		/* r100  */
+	0x00280000,		/* rv100 */
+	0x00000000,		/* rs100 */
+	0x00880000,		/* rv200 */
+	0x00000000,		/* rs200 */
+	0x00000000,		/* r200  */
+	0x00770000,		/* rv250 */
+	0x00290000,		/* rs300 */
+	0x00560000,		/* rv280 */
+	0x00780000,		/* r300  */
+	0x00770000,		/* r350  */
+	0x00780000,		/* rv350 */
+	0x00780000,		/* rv380 */
+	0x01080000,		/* r420  */
+	0x01080000,		/* r423  */
+	0x01080000,		/* rv410 */
+	0x00780000,		/* rs400 */
+	0x00780000,		/* rs480 */
+};
+
+static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev,
+						     struct radeon_encoder_tv_dac *tv_dac)
+{
+	tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
+	if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
+		tv_dac->ps2_tvdac_adj = 0x00880000;
+	tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+	tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+	return;
+}
+
+struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+							     radeon_encoder
+							     *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t dac_info;
+	uint8_t rev, bg, dac;
+	struct radeon_encoder_tv_dac *tv_dac = NULL;
+	int found = 0;
+
+	tv_dac = malloc(sizeof(struct radeon_encoder_tv_dac),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!tv_dac)
+		return NULL;
+
+	/* first check TV table */
+	dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+	if (dac_info) {
+		rev = RBIOS8(dac_info + 0x3);
+		if (rev > 4) {
+			bg = RBIOS8(dac_info + 0xc) & 0xf;
+			dac = RBIOS8(dac_info + 0xd) & 0xf;
+			tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xe) & 0xf;
+			dac = RBIOS8(dac_info + 0xf) & 0xf;
+			tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0x10) & 0xf;
+			dac = RBIOS8(dac_info + 0x11) & 0xf;
+			tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+			/* if the values are all zeros, use the table */
+			if (tv_dac->ps2_tvdac_adj)
+				found = 1;
+		} else if (rev > 1) {
+			bg = RBIOS8(dac_info + 0xc) & 0xf;
+			dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
+			tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xd) & 0xf;
+			dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf;
+			tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xe) & 0xf;
+			dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
+			tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+			/* if the values are all zeros, use the table */
+			if (tv_dac->ps2_tvdac_adj)
+				found = 1;
+		}
+		tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
+	}
+	if (!found) {
+		/* then check CRT table */
+		dac_info =
+		    combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+		if (dac_info) {
+			rev = RBIOS8(dac_info) & 0x3;
+			if (rev < 2) {
+				bg = RBIOS8(dac_info + 0x3) & 0xf;
+				dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf;
+				tv_dac->ps2_tvdac_adj =
+				    (bg << 16) | (dac << 20);
+				tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				/* if the values are all zeros, use the table */
+				if (tv_dac->ps2_tvdac_adj)
+					found = 1;
+			} else {
+				bg = RBIOS8(dac_info + 0x4) & 0xf;
+				dac = RBIOS8(dac_info + 0x5) & 0xf;
+				tv_dac->ps2_tvdac_adj =
+				    (bg << 16) | (dac << 20);
+				tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				/* if the values are all zeros, use the table */
+				if (tv_dac->ps2_tvdac_adj)
+					found = 1;
+			}
+		} else {
+			DRM_INFO("No TV DAC info found in BIOS\n");
+		}
+	}
+
+	if (!found) /* fallback to defaults */
+		radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
+
+	return tv_dac;
+}
+
+static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
+									 radeon_device
+									 *rdev)
+{
+	struct radeon_encoder_lvds *lvds = NULL;
+	uint32_t fp_vert_stretch, fp_horz_stretch;
+	uint32_t ppll_div_sel, ppll_val;
+	uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+
+	lvds = malloc(sizeof(struct radeon_encoder_lvds),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+	if (!lvds)
+		return NULL;
+
+	fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
+	fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
+
+	/* These should be fail-safe defaults, fingers crossed */
+	lvds->panel_pwr_delay = 200;
+	lvds->panel_vcc_delay = 2000;
+
+	lvds->lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	lvds->panel_digon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) & 0xf;
+	lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
+
+	if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
+		lvds->native_mode.vdisplay =
+		    ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
+		     RADEON_VERT_PANEL_SHIFT) + 1;
+	else
+		lvds->native_mode.vdisplay =
+		    (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
+
+	if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
+		lvds->native_mode.hdisplay =
+		    (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
+		      RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
+	else
+		lvds->native_mode.hdisplay =
+		    ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
+
+	if ((lvds->native_mode.hdisplay < 640) ||
+	    (lvds->native_mode.vdisplay < 480)) {
+		lvds->native_mode.hdisplay = 640;
+		lvds->native_mode.vdisplay = 480;
+	}
+
+	ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
+	ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel);
+	if ((ppll_val & 0x000707ff) == 0x1bb)
+		lvds->use_bios_dividers = false;
+	else {
+		lvds->panel_ref_divider =
+		    RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+		lvds->panel_post_divider = (ppll_val >> 16) & 0x7;
+		lvds->panel_fb_divider = ppll_val & 0x7ff;
+
+		if ((lvds->panel_ref_divider != 0) &&
+		    (lvds->panel_fb_divider > 3))
+			lvds->use_bios_dividers = true;
+	}
+	lvds->panel_vcc_delay = 200;
+
+	DRM_INFO("Panel info derived from registers\n");
+	DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+		 lvds->native_mode.vdisplay);
+
+	return lvds;
+}
+
+struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+							 *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t lcd_info;
+	uint32_t panel_setup;
+	char stmp[30];
+	int tmp, i;
+	struct radeon_encoder_lvds *lvds = NULL;
+
+	lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+
+	if (lcd_info) {
+		lvds = malloc(sizeof(struct radeon_encoder_lvds),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+		if (!lvds)
+			return NULL;
+
+		for (i = 0; i < 24; i++)
+			stmp[i] = RBIOS8(lcd_info + i + 1);
+		stmp[24] = 0;
+
+		DRM_INFO("Panel ID String: %s\n", stmp);
+
+		lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19);
+		lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b);
+
+		DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+			 lvds->native_mode.vdisplay);
+
+		lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
+		lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
+
+		lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
+		lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
+		lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf;
+
+		lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e);
+		lvds->panel_post_divider = RBIOS8(lcd_info + 0x30);
+		lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31);
+		if ((lvds->panel_ref_divider != 0) &&
+		    (lvds->panel_fb_divider > 3))
+			lvds->use_bios_dividers = true;
+
+		panel_setup = RBIOS32(lcd_info + 0x39);
+		lvds->lvds_gen_cntl = 0xff00;
+		if (panel_setup & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT;
+
+		if ((panel_setup >> 4) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE;
+
+		switch ((panel_setup >> 8) & 0x7) {
+		case 0:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM;
+			break;
+		case 1:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY;
+			break;
+		case 2:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY;
+			break;
+		default:
+			break;
+		}
+
+		if ((panel_setup >> 16) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW;
+
+		if ((panel_setup >> 17) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW;
+
+		if ((panel_setup >> 18) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW;
+
+		if ((panel_setup >> 23) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL;
+
+		lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000);
+
+		for (i = 0; i < 32; i++) {
+			tmp = RBIOS16(lcd_info + 64 + i * 2);
+			if (tmp == 0)
+				break;
+
+			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
+			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+					(RBIOS8(tmp + 23) * 8);
+
+				lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+					(RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+				lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+					((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+				lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+					((RBIOS16(tmp + 28) & 0xf800) >> 11);
+
+				lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
+				lvds->native_mode.flags = 0;
+				/* set crtc values */
+				drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+			}
+		}
+	} else {
+		DRM_INFO("No panel info found in BIOS\n");
+		lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
+	}
+
+	if (lvds)
+		encoder->native_mode = lvds->native_mode;
+	return lvds;
+}
+
+static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_R100  */
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RV100 */
+	{{0, 0}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RS100 */
+	{{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RV200 */
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RS200 */
+	{{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_R200  */
+	{{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}},	/* CHIP_RV250 */
+	{{0, 0}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RS300 */
+	{{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}},	/* CHIP_RV280 */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R300  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R350  */
+	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RV350 */
+	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RV380 */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R420  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R423  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RV410 */
+	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS400 */
+	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS480 */
+};
+
+bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+					    struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		tmds->tmds_pll[i].value =
+			default_tmds_pll[rdev->family][i].value;
+		tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq;
+	}
+
+	return true;
+}
+
+bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+					      struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t tmds_info;
+	int i, n;
+	uint8_t ver;
+
+	tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+
+	if (tmds_info) {
+		ver = RBIOS8(tmds_info);
+		DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
+		if (ver == 3) {
+			n = RBIOS8(tmds_info + 5) + 1;
+			if (n > 4)
+				n = 4;
+			for (i = 0; i < n; i++) {
+				tmds->tmds_pll[i].value =
+				    RBIOS32(tmds_info + i * 10 + 0x08);
+				tmds->tmds_pll[i].freq =
+				    RBIOS16(tmds_info + i * 10 + 0x10);
+				DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
+					  tmds->tmds_pll[i].freq,
+					  tmds->tmds_pll[i].value);
+			}
+		} else if (ver == 4) {
+			int stride = 0;
+			n = RBIOS8(tmds_info + 5) + 1;
+			if (n > 4)
+				n = 4;
+			for (i = 0; i < n; i++) {
+				tmds->tmds_pll[i].value =
+				    RBIOS32(tmds_info + stride + 0x08);
+				tmds->tmds_pll[i].freq =
+				    RBIOS16(tmds_info + stride + 0x10);
+				if (i == 0)
+					stride += 10;
+				else
+					stride += 6;
+				DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
+					  tmds->tmds_pll[i].freq,
+					  tmds->tmds_pll[i].value);
+			}
+		}
+	} else {
+		DRM_INFO("No TMDS info found in BIOS\n");
+		return false;
+	}
+	return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						struct radeon_encoder_ext_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* default for macs */
+	i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+	tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+
+	/* XXX some macs have duallink chips */
+	switch (rdev->mode_info.connector_table) {
+	case CT_POWERBOOK_EXTERNAL:
+	case CT_MINI_EXTERNAL:
+	default:
+		tmds->dvo_chip = DVO_SIL164;
+		tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+		break;
+	}
+
+	return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+						  struct radeon_encoder_ext_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t offset;
+	uint8_t ver;
+	enum radeon_combios_ddc gpio;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	tmds->i2c_bus = NULL;
+	if (rdev->flags & RADEON_IS_IGP) {
+		i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		tmds->dvo_chip = DVO_SIL164;
+		tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+	} else {
+		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+		if (offset) {
+			ver = RBIOS8(offset);
+			DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
+			tmds->slave_addr = RBIOS8(offset + 4 + 2);
+			tmds->slave_addr >>= 1; /* 7 bit addressing */
+			gpio = RBIOS8(offset + 4 + 3);
+			if (gpio == DDC_LCD) {
+				/* MM i2c */
+				i2c_bus.valid = true;
+				i2c_bus.hw_capable = true;
+				i2c_bus.mm_i2c = true;
+				i2c_bus.i2c_id = 0xa0;
+			} else
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+			tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		}
+	}
+
+	if (!tmds->i2c_bus) {
+		DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+		return false;
+	}
+
+	return true;
+}
+
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_bus_rec ddc_i2c;
+	struct radeon_hpd hpd;
+
+	rdev->mode_info.connector_table = radeon_connector_table;
+	if (rdev->mode_info.connector_table == CT_NONE) {
+#ifdef CONFIG_PPC_PMAC
+		if (of_machine_is_compatible("PowerBook3,3")) {
+			/* powerbook with VGA */
+			rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
+		} else if (of_machine_is_compatible("PowerBook3,4") ||
+			   of_machine_is_compatible("PowerBook3,5")) {
+			/* powerbook with internal tmds */
+			rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,1") ||
+			   of_machine_is_compatible("PowerBook5,2") ||
+			   of_machine_is_compatible("PowerBook5,3") ||
+			   of_machine_is_compatible("PowerBook5,4") ||
+			   of_machine_is_compatible("PowerBook5,5")) {
+			/* powerbook with external single link tmds (sil164) */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,6")) {
+			/* powerbook with external dual or single link tmds */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,7") ||
+			   of_machine_is_compatible("PowerBook5,8") ||
+			   of_machine_is_compatible("PowerBook5,9")) {
+			/* PowerBook6,2 ? */
+			/* powerbook with external dual link tmds (sil1178?) */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook4,1") ||
+			   of_machine_is_compatible("PowerBook4,2") ||
+			   of_machine_is_compatible("PowerBook4,3") ||
+			   of_machine_is_compatible("PowerBook6,3") ||
+			   of_machine_is_compatible("PowerBook6,5") ||
+			   of_machine_is_compatible("PowerBook6,7")) {
+			/* ibook */
+			rdev->mode_info.connector_table = CT_IBOOK;
+		} else if (of_machine_is_compatible("PowerMac3,5")) {
+			/* PowerMac G4 Silver radeon 7500 */
+			rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
+		} else if (of_machine_is_compatible("PowerMac4,4")) {
+			/* emac */
+			rdev->mode_info.connector_table = CT_EMAC;
+		} else if (of_machine_is_compatible("PowerMac10,1")) {
+			/* mini with internal tmds */
+			rdev->mode_info.connector_table = CT_MINI_INTERNAL;
+		} else if (of_machine_is_compatible("PowerMac10,2")) {
+			/* mini with external tmds */
+			rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerMac12,1")) {
+			/* PowerMac8,1 ? */
+			/* imac g5 isight */
+			rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+		} else if ((dev->pci_device == 0x4a48) &&
+			   (dev->pci_subvendor == 0x1002) &&
+			   (dev->pci_subdevice == 0x4a48)) {
+			/* Mac X800 */
+			rdev->mode_info.connector_table = CT_MAC_X800;
+		} else if ((of_machine_is_compatible("PowerMac7,2") ||
+			    of_machine_is_compatible("PowerMac7,3")) &&
+			   (dev->pci_device == 0x4150) &&
+			   (dev->pci_subvendor == 0x1002) &&
+			   (dev->pci_subdevice == 0x4150)) {
+			/* Mac G5 tower 9600 */
+			rdev->mode_info.connector_table = CT_MAC_G5_9600;
+		} else if ((dev->pci_device == 0x4c66) &&
+			   (dev->pci_subvendor == 0x1002) &&
+			   (dev->pci_subdevice == 0x4c66)) {
+			/* SAM440ep RV250 embedded board */
+			rdev->mode_info.connector_table = CT_SAM440EP;
+		} else
+#endif /* CONFIG_PPC_PMAC */
+#ifdef CONFIG_PPC64
+		if (ASIC_IS_RN50(rdev))
+			rdev->mode_info.connector_table = CT_RN50_POWER;
+		else
+#endif
+			rdev->mode_info.connector_table = CT_GENERIC;
+	}
+
+	switch (rdev->mode_info.connector_table) {
+	case CT_GENERIC:
+		DRM_INFO("Connector Table: %d (generic)\n",
+			 rdev->mode_info.connector_table);
+		/* these are the most common settings */
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		} else if (rdev->flags & RADEON_IS_MOBILITY) {
+			/* LVDS */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_LCD1_SUPPORT,
+									0),
+						  ATOM_DEVICE_LCD1_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_LCD1_SUPPORT,
+						    DRM_MODE_CONNECTOR_LVDS,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_LVDS,
+						    &hpd);
+
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 1,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		} else {
+			/* DVI-I - tv dac, int tmds */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+			hpd.hpd = RADEON_HPD_1;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_DFP1_SUPPORT,
+									0),
+						  ATOM_DEVICE_DFP1_SUPPORT);
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT2_SUPPORT,
+									2),
+						  ATOM_DEVICE_CRT2_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_DFP1_SUPPORT |
+						    ATOM_DEVICE_CRT2_SUPPORT,
+						    DRM_MODE_CONNECTOR_DVII,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+						    &hpd);
+
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 1,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		}
+
+		if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+			/* TV - tv dac */
+			ddc_i2c.valid = false;
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_TV1_SUPPORT,
+									2),
+						  ATOM_DEVICE_TV1_SUPPORT);
+			radeon_add_legacy_connector(dev, 2,
+						    ATOM_DEVICE_TV1_SUPPORT,
+						    DRM_MODE_CONNECTOR_SVIDEO,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SVIDEO,
+						    &hpd);
+		}
+		break;
+	case CT_IBOOK:
+		DRM_INFO("Connector Table: %d (ibook)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* VGA - TV DAC */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_EXTERNAL:
+		DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - primary dac, ext tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP2_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		/* XXX some are SL */
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_INTERNAL:
+		DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - primary dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_VGA:
+		DRM_INFO("Connector Table: %d (powerbook vga)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MINI_EXTERNAL:
+		DRM_INFO("Connector Table: %d (mini external tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, ext tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP2_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		/* XXX are any DL? */
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MINI_INTERNAL:
+		DRM_INFO("Connector Table: %d (mini internal tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_IMAC_G5_ISIGHT:
+		DRM_INFO("Connector Table: %d (imac g5 isight)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-D - int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+					    &hpd);
+		/* VGA - tv dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_EMAC:
+		DRM_INFO("Connector Table: %d (emac)\n",
+			 rdev->mode_info.connector_table);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* VGA - tv dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_RN50_POWER:
+		DRM_INFO("Connector Table: %d (rn50-power)\n",
+			 rdev->mode_info.connector_table);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		break;
+	case CT_MAC_X800:
+		DRM_INFO("Connector Table: %d (mac x800)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+					    &hpd);
+		break;
+	case CT_MAC_G5_9600:
+		DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* ADC - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_SAM440EP:
+		DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - secondary dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2,
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MAC_G4_SILVER:
+		DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	default:
+		DRM_INFO("Connector table: %d (invalid)\n",
+			 rdev->mode_info.connector_table);
+		return false;
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	return true;
+}
+
+static bool radeon_apply_legacy_quirks(struct drm_device *dev,
+				       int bios_index,
+				       enum radeon_combios_connector
+				       *legacy_connector,
+				       struct radeon_i2c_bus_rec *ddc_i2c,
+				       struct radeon_hpd *hpd)
+{
+
+	/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
+	   one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
+	if (dev->pci_device == 0x515e &&
+	    dev->pci_subvendor == 0x1014) {
+		if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
+		    ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+			return false;
+	}
+
+	/* X300 card with extra non-existent DVI port */
+	if (dev->pci_device == 0x5B60 &&
+	    dev->pci_subvendor == 0x17af &&
+	    dev->pci_subdevice == 0x201e && bios_index == 2) {
+		if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
+			return false;
+	}
+
+	return true;
+}
+
+static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
+{
+	/* Acer 5102 has non-existent TV port */
+	if (dev->pci_device == 0x5975 &&
+	    dev->pci_subvendor == 0x1025 &&
+	    dev->pci_subdevice == 0x009f)
+		return false;
+
+	/* HP dc5750 has non-existent TV port */
+	if (dev->pci_device == 0x5974 &&
+	    dev->pci_subvendor == 0x103c &&
+	    dev->pci_subdevice == 0x280a)
+		return false;
+
+	/* MSI S270 has non-existent TV port */
+	if (dev->pci_device == 0x5955 &&
+	    dev->pci_subvendor == 0x1462 &&
+	    dev->pci_subdevice == 0x0131)
+		return false;
+
+	return true;
+}
+
+static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t ext_tmds_info;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (is_dvi_d)
+			return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+		else
+			return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+	}
+	ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+	if (ext_tmds_info) {
+		uint8_t rev = RBIOS8(ext_tmds_info);
+		uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5);
+		if (rev >= 3) {
+			if (is_dvi_d)
+				return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+			else
+				return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+		} else {
+			if (flags & 1) {
+				if (is_dvi_d)
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+				else
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+			}
+		}
+	}
+	if (is_dvi_d)
+		return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+	else
+		return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+}
+
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t conn_info, entry, devices;
+	uint16_t tmp, connector_object_id;
+	enum radeon_combios_ddc ddc_type;
+	enum radeon_combios_connector connector;
+	int i = 0;
+	struct radeon_i2c_bus_rec ddc_i2c;
+	struct radeon_hpd hpd;
+
+	conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
+	if (conn_info) {
+		for (i = 0; i < 4; i++) {
+			entry = conn_info + 2 + i * 2;
+
+			if (!RBIOS16(entry))
+				break;
+
+			tmp = RBIOS16(entry);
+
+			connector = (tmp >> 12) & 0xf;
+
+			ddc_type = (tmp >> 8) & 0xf;
+			if (ddc_type == 5)
+				ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
+			else
+				ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+
+			switch (connector) {
+			case CONNECTOR_PROPRIETARY_LEGACY:
+			case CONNECTOR_DVI_I_LEGACY:
+			case CONNECTOR_DVI_D_LEGACY:
+				if ((tmp >> 4) & 0x1)
+					hpd.hpd = RADEON_HPD_2;
+				else
+					hpd.hpd = RADEON_HPD_1;
+				break;
+			default:
+				hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+
+			if (!radeon_apply_legacy_quirks(dev, i, &connector,
+							&ddc_i2c, &hpd))
+				continue;
+
+			switch (connector) {
+			case CONNECTOR_PROPRIETARY_LEGACY:
+				if ((tmp >> 4) & 0x1)
+					devices = ATOM_DEVICE_DFP2_SUPPORT;
+				else
+					devices = ATOM_DEVICE_DFP1_SUPPORT;
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev, devices, 0),
+							  devices);
+				radeon_add_legacy_connector(dev, i, devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+							    &hpd);
+				break;
+			case CONNECTOR_CRT_LEGACY:
+				if (tmp & 0x1) {
+					devices = ATOM_DEVICE_CRT2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT2_SUPPORT,
+								   2),
+								  ATOM_DEVICE_CRT2_SUPPORT);
+				} else {
+					devices = ATOM_DEVICE_CRT1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT1_SUPPORT,
+								   1),
+								  ATOM_DEVICE_CRT1_SUPPORT);
+				}
+				radeon_add_legacy_connector(dev,
+							    i,
+							    devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_VGA,
+							    &hpd);
+				break;
+			case CONNECTOR_DVI_I_LEGACY:
+				devices = 0;
+				if (tmp & 0x1) {
+					devices |= ATOM_DEVICE_CRT2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT2_SUPPORT,
+								   2),
+								  ATOM_DEVICE_CRT2_SUPPORT);
+				} else {
+					devices |= ATOM_DEVICE_CRT1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT1_SUPPORT,
+								   1),
+								  ATOM_DEVICE_CRT1_SUPPORT);
+				}
+				/* RV100 board with external TDMS bit mis-set.
+				 * Actually uses internal TMDS, clear the bit.
+				 */
+				if (dev->pci_device == 0x5159 &&
+				    dev->pci_subvendor == 0x1014 &&
+				    dev->pci_subdevice == 0x029A) {
+					tmp &= ~(1 << 4);
+				}
+				if ((tmp >> 4) & 0x1) {
+					devices |= ATOM_DEVICE_DFP2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_DFP2_SUPPORT,
+								   0),
+								  ATOM_DEVICE_DFP2_SUPPORT);
+					connector_object_id = combios_check_dl_dvi(dev, 0);
+				} else {
+					devices |= ATOM_DEVICE_DFP1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_DFP1_SUPPORT,
+								   0),
+								  ATOM_DEVICE_DFP1_SUPPORT);
+					connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				}
+				radeon_add_legacy_connector(dev,
+							    i,
+							    devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    connector_object_id,
+							    &hpd);
+				break;
+			case CONNECTOR_DVI_D_LEGACY:
+				if ((tmp >> 4) & 0x1) {
+					devices = ATOM_DEVICE_DFP2_SUPPORT;
+					connector_object_id = combios_check_dl_dvi(dev, 1);
+				} else {
+					devices = ATOM_DEVICE_DFP1_SUPPORT;
+					connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				}
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev, devices, 0),
+							  devices);
+				radeon_add_legacy_connector(dev, i, devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    connector_object_id,
+							    &hpd);
+				break;
+			case CONNECTOR_CTV_LEGACY:
+			case CONNECTOR_STV_LEGACY:
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev,
+							   ATOM_DEVICE_TV1_SUPPORT,
+							   2),
+							  ATOM_DEVICE_TV1_SUPPORT);
+				radeon_add_legacy_connector(dev, i,
+							    ATOM_DEVICE_TV1_SUPPORT,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_SVIDEO,
+							    &hpd);
+				break;
+			default:
+				DRM_ERROR("Unknown connector type: %d\n",
+					  connector);
+				continue;
+			}
+
+		}
+	} else {
+		uint16_t tmds_info =
+		    combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+		if (tmds_info) {
+			DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
+
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_DFP1_SUPPORT,
+									0),
+						  ATOM_DEVICE_DFP1_SUPPORT);
+
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+			hpd.hpd = RADEON_HPD_1;
+			radeon_add_legacy_connector(dev,
+						    0,
+						    ATOM_DEVICE_CRT1_SUPPORT |
+						    ATOM_DEVICE_DFP1_SUPPORT,
+						    DRM_MODE_CONNECTOR_DVII,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+						    &hpd);
+		} else {
+			uint16_t crt_info =
+				combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+			DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
+			if (crt_info) {
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum(dev,
+										ATOM_DEVICE_CRT1_SUPPORT,
+										1),
+							  ATOM_DEVICE_CRT1_SUPPORT);
+				ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+				hpd.hpd = RADEON_HPD_NONE;
+				radeon_add_legacy_connector(dev,
+							    0,
+							    ATOM_DEVICE_CRT1_SUPPORT,
+							    DRM_MODE_CONNECTOR_VGA,
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_VGA,
+							    &hpd);
+			} else {
+				DRM_DEBUG_KMS("No connector info found\n");
+				return false;
+			}
+		}
+	}
+
+	if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) {
+		uint16_t lcd_info =
+		    combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+		if (lcd_info) {
+			uint16_t lcd_ddc_info =
+			    combios_get_table_offset(dev,
+						     COMBIOS_LCD_DDC_INFO_TABLE);
+
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_LCD1_SUPPORT,
+									0),
+						  ATOM_DEVICE_LCD1_SUPPORT);
+
+			if (lcd_ddc_info) {
+				ddc_type = RBIOS8(lcd_ddc_info + 2);
+				switch (ddc_type) {
+				case DDC_LCD:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev,
+								      DDC_LCD,
+								      RBIOS32(lcd_ddc_info + 3),
+								      RBIOS32(lcd_ddc_info + 7));
+					radeon_i2c_add(rdev, &ddc_i2c, "LCD");
+					break;
+				case DDC_GPIO:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev,
+								      DDC_GPIO,
+								      RBIOS32(lcd_ddc_info + 3),
+								      RBIOS32(lcd_ddc_info + 7));
+					radeon_i2c_add(rdev, &ddc_i2c, "LCD");
+					break;
+				default:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+					break;
+				}
+				DRM_DEBUG_KMS("LCD DDC Info Table found!\n");
+			} else
+				ddc_i2c.valid = false;
+
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_connector(dev,
+						    5,
+						    ATOM_DEVICE_LCD1_SUPPORT,
+						    DRM_MODE_CONNECTOR_LVDS,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_LVDS,
+						    &hpd);
+		}
+	}
+
+	/* check TV table */
+	if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+		uint32_t tv_info =
+		    combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+		if (tv_info) {
+			if (RBIOS8(tv_info + 6) == 'T') {
+				if (radeon_apply_legacy_tv_quirks(dev)) {
+					hpd.hpd = RADEON_HPD_NONE;
+					ddc_i2c.valid = false;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_TV1_SUPPORT,
+								   2),
+								  ATOM_DEVICE_TV1_SUPPORT);
+					radeon_add_legacy_connector(dev, 6,
+								    ATOM_DEVICE_TV1_SUPPORT,
+								    DRM_MODE_CONNECTOR_SVIDEO,
+								    &ddc_i2c,
+								    CONNECTOR_OBJECT_ID_SVIDEO,
+								    &hpd);
+				}
+			}
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	return true;
+}
+
+static const char *thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+};
+
+void radeon_combios_get_power_modes(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	u16 offset, misc, misc2 = 0;
+	u8 rev, blocks, tmp;
+	int state_index = 0;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	rdev->pm.default_power_state_index = -1;
+
+	/* allocate 2 power states */
+	rdev->pm.power_state = malloc(sizeof(struct radeon_power_state) * 2,
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (rdev->pm.power_state) {
+		/* allocate 1 clock mode per state */
+		rdev->pm.power_state[0].clock_info =
+			malloc(sizeof(struct radeon_pm_clock_info) * 1,
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		rdev->pm.power_state[1].clock_info =
+			malloc(sizeof(struct radeon_pm_clock_info) * 1,
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (!rdev->pm.power_state[0].clock_info ||
+		    !rdev->pm.power_state[1].clock_info)
+			goto pm_failed;
+	} else
+		goto pm_failed;
+
+	/* check for a thermal chip */
+	offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
+	if (offset) {
+		u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
+
+		rev = RBIOS8(offset);
+
+		if (rev == 0) {
+			thermal_controller = RBIOS8(offset + 3);
+			gpio = RBIOS8(offset + 4) & 0x3f;
+			i2c_addr = RBIOS8(offset + 5);
+		} else if (rev == 1) {
+			thermal_controller = RBIOS8(offset + 4);
+			gpio = RBIOS8(offset + 5) & 0x3f;
+			i2c_addr = RBIOS8(offset + 6);
+		} else if (rev == 2) {
+			thermal_controller = RBIOS8(offset + 4);
+			gpio = RBIOS8(offset + 5) & 0x3f;
+			i2c_addr = RBIOS8(offset + 6);
+			clk_bit = RBIOS8(offset + 0xa);
+			data_bit = RBIOS8(offset + 0xb);
+		}
+		if ((thermal_controller > 0) && (thermal_controller < 3)) {
+			DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+				 thermal_controller_names[thermal_controller],
+				 i2c_addr >> 1);
+			if (gpio == DDC_LCD) {
+				/* MM i2c */
+				i2c_bus.valid = true;
+				i2c_bus.hw_capable = true;
+				i2c_bus.mm_i2c = true;
+				i2c_bus.i2c_id = 0xa0;
+			} else if (gpio == DDC_GPIO)
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit);
+			else
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+#ifdef FREEBSD_WIP
+				struct i2c_board_info info = { };
+				const char *name = thermal_controller_names[thermal_controller];
+				info.addr = i2c_addr >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+#endif /* FREEBSD_WIP */
+			}
+		}
+	} else {
+		/* boards with a thermal chip, but no overdrive table */
+
+		/* Asus 9600xt has an f75375 on the monid bus */
+		if ((dev->pci_device == 0x4152) &&
+		    (dev->pci_subvendor == 0x1043) &&
+		    (dev->pci_subdevice == 0xc002)) {
+			i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+#ifdef FREEBSD_WIP
+				struct i2c_board_info info = { };
+				const char *name = "f75375";
+				info.addr = 0x28;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+				DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+					 name, info.addr);
+#endif /* FREEBSD_WIP */
+			}
+		}
+	}
+
+	if (rdev->flags & RADEON_IS_MOBILITY) {
+		offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
+		if (offset) {
+			rev = RBIOS8(offset);
+			blocks = RBIOS8(offset + 0x2);
+			/* power mode 0 tends to be the only valid one */
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
+			rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				goto default_mode;
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_BATTERY;
+			misc = RBIOS16(offset + 0x5 + 0x0);
+			if (rev > 4)
+				misc2 = RBIOS16(offset + 0x5 + 0xe);
+			rdev->pm.power_state[state_index].misc = misc;
+			rdev->pm.power_state[state_index].misc2 = misc2;
+			if (misc & 0x4) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
+				if (misc & 0x8)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
+				if (rev < 6) {
+					rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+						RBIOS16(offset + 0x5 + 0xb) * 4;
+					tmp = RBIOS8(offset + 0x5 + 0xd);
+					rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+				} else {
+					u8 entries = RBIOS8(offset + 0x5 + 0xb);
+					u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
+					if (entries && voltage_table_offset) {
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+							RBIOS16(voltage_table_offset) * 4;
+						tmp = RBIOS8(voltage_table_offset + 0x2);
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+					} else
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
+				}
+				switch ((misc2 & 0x700) >> 8) {
+				case 0:
+				default:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
+					break;
+				case 1:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
+					break;
+				case 2:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
+					break;
+				case 3:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
+					break;
+				case 4:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
+					break;
+				}
+			} else
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+			if (rev > 6)
+				rdev->pm.power_state[state_index].pcie_lanes =
+					RBIOS8(offset + 0x5 + 0x10);
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			state_index++;
+		} else {
+			/* XXX figure out some good default low power mode for mobility cards w/out power tables */
+		}
+	} else {
+		/* XXX figure out some good default low power mode for desktop cards */
+	}
+
+default_mode:
+	/* add the default mode */
+	rdev->pm.power_state[state_index].type =
+		POWER_STATE_TYPE_DEFAULT;
+	rdev->pm.power_state[state_index].num_clock_modes = 1;
+	rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+	rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+	rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
+	if ((state_index > 0) &&
+	    (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
+		rdev->pm.power_state[state_index].clock_info[0].voltage =
+			rdev->pm.power_state[0].clock_info[0].voltage;
+	else
+		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+	rdev->pm.power_state[state_index].pcie_lanes = 16;
+	rdev->pm.power_state[state_index].flags = 0;
+	rdev->pm.default_power_state_index = state_index;
+	rdev->pm.num_power_states = state_index + 1;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	return;
+
+pm_failed:
+	rdev->pm.default_power_state_index = state_index;
+	rdev->pm.num_power_states = 0;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+}
+
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+	if (!tmds)
+		return;
+
+	switch (tmds->dvo_chip) {
+	case DVO_SIL164:
+		/* sil 164 */
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x08, 0x30);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x09, 0x00);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x0a, 0x90);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x0c, 0x89);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x08, 0x3b);
+		break;
+	case DVO_SIL1178:
+		/* sil 1178 - untested */
+		/*
+		 * 0x0f, 0x44
+		 * 0x0f, 0x4c
+		 * 0x0e, 0x01
+		 * 0x0a, 0x80
+		 * 0x09, 0x30
+		 * 0x0c, 0xc9
+		 * 0x0d, 0x70
+		 * 0x08, 0x32
+		 * 0x08, 0x33
+		 */
+		break;
+	default:
+		break;
+	}
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint16_t offset;
+	uint8_t blocks, slave_addr, rev;
+	uint32_t index, id;
+	uint32_t reg, val, and_mask, or_mask;
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+	if (!tmds)
+		return false;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+		rev = RBIOS8(offset);
+		if (offset) {
+			rev = RBIOS8(offset);
+			if (rev > 1) {
+				blocks = RBIOS8(offset + 3);
+				index = offset + 4;
+				while (blocks > 0) {
+					id = RBIOS16(index);
+					index += 2;
+					switch (id >> 13) {
+					case 0:
+						reg = (id & 0x1fff) * 4;
+						val = RBIOS32(index);
+						index += 4;
+						WREG32(reg, val);
+						break;
+					case 2:
+						reg = (id & 0x1fff) * 4;
+						and_mask = RBIOS32(index);
+						index += 4;
+						or_mask = RBIOS32(index);
+						index += 4;
+						val = RREG32(reg);
+						val = (val & and_mask) | or_mask;
+						WREG32(reg, val);
+						break;
+					case 3:
+						val = RBIOS16(index);
+						index += 2;
+						udelay(val);
+						break;
+					case 4:
+						val = RBIOS16(index);
+						index += 2;
+						mdelay(val);
+						break;
+					case 6:
+						slave_addr = id & 0xff;
+						slave_addr >>= 1; /* 7 bit addressing */
+						index++;
+						reg = RBIOS8(index);
+						index++;
+						val = RBIOS8(index);
+						index++;
+						radeon_i2c_put_byte(tmds->i2c_bus,
+								    slave_addr,
+								    reg, val);
+						break;
+					default:
+						DRM_ERROR("Unknown id %d\n", id >> 13);
+						break;
+					}
+					blocks--;
+				}
+				return true;
+			}
+		}
+	} else {
+		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+		if (offset) {
+			index = offset + 10;
+			id = RBIOS16(index);
+			while (id != 0xffff) {
+				index += 2;
+				switch (id >> 13) {
+				case 0:
+					reg = (id & 0x1fff) * 4;
+					val = RBIOS32(index);
+					WREG32(reg, val);
+					break;
+				case 2:
+					reg = (id & 0x1fff) * 4;
+					and_mask = RBIOS32(index);
+					index += 4;
+					or_mask = RBIOS32(index);
+					index += 4;
+					val = RREG32(reg);
+					val = (val & and_mask) | or_mask;
+					WREG32(reg, val);
+					break;
+				case 4:
+					val = RBIOS16(index);
+					index += 2;
+					udelay(val);
+					break;
+				case 5:
+					reg = id & 0x1fff;
+					and_mask = RBIOS32(index);
+					index += 4;
+					or_mask = RBIOS32(index);
+					index += 4;
+					val = RREG32_PLL(reg);
+					val = (val & and_mask) | or_mask;
+					WREG32_PLL(reg, val);
+					break;
+				case 6:
+					reg = id & 0x1fff;
+					val = RBIOS8(index);
+					index += 1;
+					radeon_i2c_put_byte(tmds->i2c_bus,
+							    tmds->slave_addr,
+							    reg, val);
+					break;
+				default:
+					DRM_ERROR("Unknown id %d\n", id >> 13);
+					break;
+				}
+				id = RBIOS16(index);
+			}
+			return true;
+		}
+	}
+	return false;
+}
+
+static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (offset) {
+		while (RBIOS16(offset)) {
+			uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13);
+			uint32_t addr = (RBIOS16(offset) & 0x1fff);
+			uint32_t val, and_mask, or_mask;
+			uint32_t tmp;
+
+			offset += 2;
+			switch (cmd) {
+			case 0:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32(addr, val);
+				break;
+			case 1:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32(addr, val);
+				break;
+			case 2:
+				and_mask = RBIOS32(offset);
+				offset += 4;
+				or_mask = RBIOS32(offset);
+				offset += 4;
+				tmp = RREG32(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32(addr, tmp);
+				break;
+			case 3:
+				and_mask = RBIOS32(offset);
+				offset += 4;
+				or_mask = RBIOS32(offset);
+				offset += 4;
+				tmp = RREG32(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32(addr, tmp);
+				break;
+			case 4:
+				val = RBIOS16(offset);
+				offset += 2;
+				udelay(val);
+				break;
+			case 5:
+				val = RBIOS16(offset);
+				offset += 2;
+				switch (addr) {
+				case 8:
+					while (val--) {
+						if (!
+						    (RREG32_PLL
+						     (RADEON_CLK_PWRMGT_CNTL) &
+						     RADEON_MC_BUSY))
+							break;
+					}
+					break;
+				case 9:
+					while (val--) {
+						if ((RREG32(RADEON_MC_STATUS) &
+						     RADEON_MC_IDLE))
+							break;
+					}
+					break;
+				default:
+					break;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+}
+
+static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (offset) {
+		while (RBIOS8(offset)) {
+			uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6);
+			uint8_t addr = (RBIOS8(offset) & 0x3f);
+			uint32_t val, shift, tmp;
+			uint32_t and_mask, or_mask;
+
+			offset++;
+			switch (cmd) {
+			case 0:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32_PLL(addr, val);
+				break;
+			case 1:
+				shift = RBIOS8(offset) * 8;
+				offset++;
+				and_mask = RBIOS8(offset) << shift;
+				and_mask |= ~(0xff << shift);
+				offset++;
+				or_mask = RBIOS8(offset) << shift;
+				offset++;
+				tmp = RREG32_PLL(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32_PLL(addr, tmp);
+				break;
+			case 2:
+			case 3:
+				tmp = 1000;
+				switch (addr) {
+				case 1:
+					udelay(150);
+					break;
+				case 2:
+					mdelay(1);
+					break;
+				case 3:
+					while (tmp--) {
+						if (!
+						    (RREG32_PLL
+						     (RADEON_CLK_PWRMGT_CNTL) &
+						     RADEON_MC_BUSY))
+							break;
+					}
+					break;
+				case 4:
+					while (tmp--) {
+						if (RREG32_PLL
+						    (RADEON_CLK_PWRMGT_CNTL) &
+						    RADEON_DLL_READY)
+							break;
+					}
+					break;
+				case 5:
+					tmp =
+					    RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+					if (tmp & RADEON_CG_NO1_DEBUG_0) {
+#if 0
+						uint32_t mclk_cntl =
+						    RREG32_PLL
+						    (RADEON_MCLK_CNTL);
+						mclk_cntl &= 0xffff0000;
+						/*mclk_cntl |= 0x00001111;*//* ??? */
+						WREG32_PLL(RADEON_MCLK_CNTL,
+							   mclk_cntl);
+						mdelay(10);
+#endif
+						WREG32_PLL
+						    (RADEON_CLK_PWRMGT_CNTL,
+						     tmp &
+						     ~RADEON_CG_NO1_DEBUG_0);
+						mdelay(10);
+					}
+					break;
+				default:
+					break;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+}
+
+static void combios_parse_ram_reset_table(struct drm_device *dev,
+					  uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	if (offset) {
+		uint8_t val = RBIOS8(offset);
+		while (val != 0xff) {
+			offset++;
+
+			if (val == 0x0f) {
+				uint32_t channel_complete_mask;
+
+				if (ASIC_IS_R300(rdev))
+					channel_complete_mask =
+					    R300_MEM_PWRUP_COMPLETE;
+				else
+					channel_complete_mask =
+					    RADEON_MEM_PWRUP_COMPLETE;
+				tmp = 20000;
+				while (tmp--) {
+					if ((RREG32(RADEON_MEM_STR_CNTL) &
+					     channel_complete_mask) ==
+					    channel_complete_mask)
+						break;
+				}
+			} else {
+				uint32_t or_mask = RBIOS16(offset);
+				offset += 2;
+
+				tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+				tmp &= RADEON_SDRAM_MODE_MASK;
+				tmp |= or_mask;
+				WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+
+				or_mask = val << 24;
+				tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+				tmp &= RADEON_B3MEM_RESET_MASK;
+				tmp |= or_mask;
+				WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+			}
+			val = RBIOS8(offset);
+		}
+	}
+}
+
+static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
+				   int mem_addr_mapping)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t mem_cntl;
+	uint32_t mem_size;
+	uint32_t addr = 0;
+
+	mem_cntl = RREG32(RADEON_MEM_CNTL);
+	if (mem_cntl & RV100_HALF_MODE)
+		ram /= 2;
+	mem_size = ram;
+	mem_cntl &= ~(0xff << 8);
+	mem_cntl |= (mem_addr_mapping & 0xff) << 8;
+	WREG32(RADEON_MEM_CNTL, mem_cntl);
+	RREG32(RADEON_MEM_CNTL);
+
+	/* sdram reset ? */
+
+	/* something like this????  */
+	while (ram--) {
+		addr = ram * 1024 * 1024;
+		/* write to each page */
+		WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
+		/* read back and verify */
+		if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
+			return 0;
+	}
+
+	return mem_size;
+}
+
+static void combios_write_ram_size(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint8_t rev;
+	uint16_t offset;
+	uint32_t mem_size = 0;
+	uint32_t mem_cntl = 0;
+
+	/* should do something smarter here I guess... */
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	/* first check detected mem table */
+	offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE);
+	if (offset) {
+		rev = RBIOS8(offset);
+		if (rev < 3) {
+			mem_cntl = RBIOS32(offset + 1);
+			mem_size = RBIOS16(offset + 5);
+			if ((rdev->family < CHIP_R200) &&
+			    !ASIC_IS_RN50(rdev))
+				WREG32(RADEON_MEM_CNTL, mem_cntl);
+		}
+	}
+
+	if (!mem_size) {
+		offset =
+		    combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+		if (offset) {
+			rev = RBIOS8(offset - 1);
+			if (rev < 1) {
+				if ((rdev->family < CHIP_R200)
+				    && !ASIC_IS_RN50(rdev)) {
+					int ram = 0;
+					int mem_addr_mapping = 0;
+
+					while (RBIOS8(offset)) {
+						ram = RBIOS8(offset);
+						mem_addr_mapping =
+						    RBIOS8(offset + 1);
+						if (mem_addr_mapping != 0x25)
+							ram *= 2;
+						mem_size =
+						    combios_detect_ram(dev, ram,
+								       mem_addr_mapping);
+						if (mem_size)
+							break;
+						offset += 2;
+					}
+				} else
+					mem_size = RBIOS8(offset);
+			} else {
+				mem_size = RBIOS8(offset);
+				mem_size *= 2;	/* convert to MB */
+			}
+		}
+	}
+
+	mem_size *= (1024 * 1024);	/* convert to bytes */
+	WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
+}
+
+void radeon_combios_asic_init(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t table;
+
+	/* port hardcoded mac stuff from radeonfb */
+	if (rdev->bios == NULL)
+		return;
+
+	/* ASIC INIT 1 */
+	table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
+	if (table)
+		combios_parse_mmio_table(dev, table);
+
+	/* PLL INIT */
+	table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
+	if (table)
+		combios_parse_pll_table(dev, table);
+
+	/* ASIC INIT 2 */
+	table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
+	if (table)
+		combios_parse_mmio_table(dev, table);
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		/* ASIC INIT 4 */
+		table =
+		    combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE);
+		if (table)
+			combios_parse_mmio_table(dev, table);
+
+		/* RAM RESET */
+		table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
+		if (table)
+			combios_parse_ram_reset_table(dev, table);
+
+		/* ASIC INIT 3 */
+		table =
+		    combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE);
+		if (table)
+			combios_parse_mmio_table(dev, table);
+
+		/* write CONFIG_MEMSIZE */
+		combios_write_ram_size(dev);
+	}
+
+	/* quirk for rs4xx HP nx6125 laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    dev->pci_subvendor == 0x103c &&
+	    dev->pci_subdevice == 0x308b)
+		return;
+
+	/* quirk for rs4xx HP dv5000 laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    dev->pci_subvendor == 0x103c &&
+	    dev->pci_subdevice == 0x30a4)
+		return;
+
+	/* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    dev->pci_subvendor == 0x103c &&
+	    dev->pci_subdevice == 0x30ae)
+		return;
+
+	/* DYN CLK 1 */
+	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+	if (table)
+		combios_parse_pll_table(dev, table);
+
+}
+
+void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch;
+
+	bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+	bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH);
+
+	/* let the bios control the backlight */
+	bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN;
+
+	/* tell the bios not to handle mode switching */
+	bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS |
+			   RADEON_ACC_MODE_CHANGE);
+
+	/* tell the bios a driver is loaded */
+	bios_7_scratch |= RADEON_DRV_LOADED;
+
+	WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch);
+}
+
+void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_6_scratch;
+
+	bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (lock)
+		bios_6_scratch |= RADEON_DRIVER_CRITICAL;
+	else
+		bios_6_scratch &= ~RADEON_DRIVER_CRITICAL;
+
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+				      struct drm_encoder *encoder,
+				      bool connected)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector =
+	    to_radeon_connector(connector);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH);
+	uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+	if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("TV1 connected\n");
+			/* fix me */
+			bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
+			/*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
+			bios_5_scratch |= RADEON_TV1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_TV1;
+		} else {
+			DRM_DEBUG_KMS("TV1 disconnected\n");
+			bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_TV1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("LCD1 connected\n");
+			bios_4_scratch |= RADEON_LCD1_ATTACHED;
+			bios_5_scratch |= RADEON_LCD1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_LCD1;
+		} else {
+			DRM_DEBUG_KMS("LCD1 disconnected\n");
+			bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
+			bios_5_scratch &= ~RADEON_LCD1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT1 connected\n");
+			bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
+			bios_5_scratch |= RADEON_CRT1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_CRT1;
+		} else {
+			DRM_DEBUG_KMS("CRT1 disconnected\n");
+			bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_CRT1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT2 connected\n");
+			bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
+			bios_5_scratch |= RADEON_CRT2_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_CRT2;
+		} else {
+			DRM_DEBUG_KMS("CRT2 disconnected\n");
+			bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_CRT2_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP1 connected\n");
+			bios_4_scratch |= RADEON_DFP1_ATTACHED;
+			bios_5_scratch |= RADEON_DFP1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_DFP1;
+		} else {
+			DRM_DEBUG_KMS("DFP1 disconnected\n");
+			bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
+			bios_5_scratch &= ~RADEON_DFP1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP2 connected\n");
+			bios_4_scratch |= RADEON_DFP2_ATTACHED;
+			bios_5_scratch |= RADEON_DFP2_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_DFP2;
+		} else {
+			DRM_DEBUG_KMS("DFP2 disconnected\n");
+			bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
+			bios_5_scratch &= ~RADEON_DFP2_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
+		}
+	}
+	WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch);
+	WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_TV1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT);
+	}
+	WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_TV_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_TV_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_CRT_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_CRT_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_LCD_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_LCD_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_DFP_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_DFP_DPMS_ON;
+	}
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_combios.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_connectors.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_connectors.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_connectors.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2052 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_connectors.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/drm_fb_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+				      struct drm_encoder *encoder,
+				      bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected);
+#endif
+
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	/* bail if the connector does not have hpd pin, e.g.,
+	 * VGA, TV, etc.
+	 */
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+		return;
+
+	radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+	/* if the connector is already off, don't turn it back on */
+	if (connector->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	/* just deal with DP (not eDP) here. */
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		/* if existing sink type was not DP no need to retrain */
+		if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+			return;
+
+		/* first get sink type as it may be reset after (un)plug */
+		dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+		/* don't do anything if sink is not display port, i.e.,
+		 * passive dp->(dvi|hdmi) adaptor
+		 */
+		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+			int saved_dpms = connector->dpms;
+			/* Only turn off the display if it's physically disconnected */
+			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+			} else if (radeon_dp_needs_link_train(radeon_connector)) {
+				/* set it to OFF so that drm_helper_connector_dpms()
+				 * won't return immediately since the current state
+				 * is ON at this point.
+				 */
+				connector->dpms = DRM_MODE_DPMS_OFF;
+				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+			}
+			connector->dpms = saved_dpms;
+		}
+	}
+}
+
+static void radeon_property_change_mode(struct drm_encoder *encoder)
+{
+	struct drm_crtc *crtc = encoder->crtc;
+
+	if (crtc && crtc->enabled) {
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y, crtc->fb);
+	}
+}
+
+int radeon_get_monitor_bpc(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+	int bpc = 8;
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		if (radeon_connector->use_digital) {
+			if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+				if (connector->display_info.bpc)
+					bpc = connector->display_info.bpc;
+			}
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+		if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+			if (connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
+		    drm_detect_hdmi_monitor(radeon_connector->edid)) {
+			if (connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+	case DRM_MODE_CONNECTOR_LVDS:
+		if (connector->display_info.bpc)
+			bpc = connector->display_info.bpc;
+		else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+			struct drm_connector_helper_funcs *connector_funcs =
+				connector->helper_private;
+			struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+			if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
+				bpc = 6;
+			else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
+				bpc = 8;
+		}
+		break;
+	}
+	return bpc;
+}
+
+static void
+radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *best_encoder = NULL;
+	struct drm_encoder *encoder = NULL;
+	struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+	struct drm_mode_object *obj;
+	bool connected;
+	int i;
+
+	best_encoder = connector_funcs->best_encoder(connector);
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev,
+					   connector->encoder_ids[i],
+					   DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+
+		if ((encoder == best_encoder) && (status == connector_status_connected))
+			connected = true;
+		else
+			connected = false;
+
+		if (rdev->is_atom_bios)
+			radeon_atombios_connected_scratch_regs(connector, encoder, connected);
+		else
+			radeon_combios_connected_scratch_regs(connector, encoder, connected);
+
+	}
+}
+
+static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
+{
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	int i;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+		if (encoder->encoder_type == encoder_type)
+			return encoder;
+	}
+	return NULL;
+}
+
+static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+/*
+ * radeon_connector_analog_encoder_conflict_solve
+ * - search for other connectors sharing this encoder
+ *   if priority is true, then set them disconnected if this is connected
+ *   if priority is false, set us disconnected if they are connected
+ */
+static enum drm_connector_status
+radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
+					       struct drm_encoder *encoder,
+					       enum drm_connector_status current_status,
+					       bool priority)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_connector *conflict;
+	struct radeon_connector *radeon_conflict;
+	int i;
+
+	list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+		if (conflict == connector)
+			continue;
+
+		radeon_conflict = to_radeon_connector(conflict);
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (conflict->encoder_ids[i] == 0)
+				break;
+
+			/* if the IDs match */
+			if (conflict->encoder_ids[i] == encoder->base.id) {
+				if (conflict->status != connector_status_connected)
+					continue;
+
+				if (radeon_conflict->use_digital)
+					continue;
+
+				if (priority == true) {
+					DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+					DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
+					conflict->status = connector_status_disconnected;
+					radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
+				} else {
+					DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+					DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
+					current_status = connector_status_disconnected;
+				}
+				break;
+			}
+		}
+	}
+	return current_status;
+
+}
+
+static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+	if (native_mode->hdisplay != 0 &&
+	    native_mode->vdisplay != 0 &&
+	    native_mode->clock != 0) {
+		mode = drm_mode_duplicate(dev, native_mode);
+		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+		drm_mode_set_name(mode);
+
+		DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
+	} else if (native_mode->hdisplay != 0 &&
+		   native_mode->vdisplay != 0) {
+		/* mac laptops without an edid */
+		/* Note that this is not necessarily the exact panel mode,
+		 * but an approximation based on the cvt formula.  For these
+		 * systems we should ideally read the mode info out of the
+		 * registers or add a mode table, but this works and is much
+		 * simpler.
+		 */
+		mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+		DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
+	}
+	return mode;
+}
+
+static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	int i;
+	struct mode_size {
+		int w;
+		int h;
+	} common_modes[17] = {
+		{ 640,  480},
+		{ 720,  480},
+		{ 800,  600},
+		{ 848,  480},
+		{1024,  768},
+		{1152,  768},
+		{1280,  720},
+		{1280,  800},
+		{1280,  854},
+		{1280,  960},
+		{1280, 1024},
+		{1440,  900},
+		{1400, 1050},
+		{1680, 1050},
+		{1600, 1200},
+		{1920, 1080},
+		{1920, 1200}
+	};
+
+	for (i = 0; i < 17; i++) {
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+			if (common_modes[i].w > 1024 ||
+			    common_modes[i].h > 768)
+				continue;
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			if (common_modes[i].w > native_mode->hdisplay ||
+			    common_modes[i].h > native_mode->vdisplay ||
+			    (common_modes[i].w == native_mode->hdisplay &&
+			     common_modes[i].h == native_mode->vdisplay))
+				continue;
+		}
+		if (common_modes[i].w < 320 || common_modes[i].h < 200)
+			continue;
+
+		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+		drm_mode_probed_add(connector, mode);
+	}
+}
+
+static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+				  uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	if (property == rdev->mode_info.coherent_mode_property) {
+		struct radeon_encoder_atom_dig *dig;
+		bool new_coherent_mode;
+
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (!radeon_encoder->enc_priv)
+			return 0;
+
+		dig = radeon_encoder->enc_priv;
+		new_coherent_mode = val ? true : false;
+		if (dig->coherent_mode != new_coherent_mode) {
+			dig->coherent_mode = new_coherent_mode;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_type != val) {
+			radeon_encoder->underscan_type = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_hborder_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_hborder != val) {
+			radeon_encoder->underscan_hborder = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_vborder_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_vborder != val) {
+			radeon_encoder->underscan_vborder = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.tv_std_property) {
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
+		if (!encoder) {
+			encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_DAC);
+		}
+
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (!radeon_encoder->enc_priv)
+			return 0;
+		if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
+			struct radeon_encoder_atom_dac *dac_int;
+			dac_int = radeon_encoder->enc_priv;
+			dac_int->tv_std = val;
+		} else {
+			struct radeon_encoder_tv_dac *dac_int;
+			dac_int = radeon_encoder->enc_priv;
+			dac_int->tv_std = val;
+		}
+		radeon_property_change_mode(&radeon_encoder->base);
+	}
+
+	if (property == rdev->mode_info.load_detect_property) {
+		struct radeon_connector *radeon_connector =
+			to_radeon_connector(connector);
+
+		if (val == 0)
+			radeon_connector->dac_load_detect = false;
+		else
+			radeon_connector->dac_load_detect = true;
+	}
+
+	if (property == rdev->mode_info.tmds_pll_property) {
+		struct radeon_encoder_int_tmds *tmds = NULL;
+		bool ret = false;
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		tmds = radeon_encoder->enc_priv;
+		if (!tmds)
+			return 0;
+
+		if (val == 0) {
+			if (rdev->is_atom_bios)
+				ret = radeon_atombios_get_tmds_info(radeon_encoder, tmds);
+			else
+				ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds);
+		}
+		if (val == 1 || ret == false) {
+			radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds);
+		}
+		radeon_property_change_mode(&radeon_encoder->base);
+	}
+
+	return 0;
+}
+
+static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
+					  struct drm_connector *connector)
+{
+	struct radeon_encoder *radeon_encoder =	to_radeon_encoder(encoder);
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	struct drm_display_mode *t, *mode;
+
+	/* If the EDID preferred mode doesn't match the native mode, use it */
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+			if (mode->hdisplay != native_mode->hdisplay ||
+			    mode->vdisplay != native_mode->vdisplay)
+				memcpy(native_mode, mode, sizeof(*mode));
+		}
+	}
+
+	/* Try to get native mode details from EDID if necessary */
+	if (!native_mode->clock) {
+		list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+			if (mode->hdisplay == native_mode->hdisplay &&
+			    mode->vdisplay == native_mode->vdisplay) {
+				*native_mode = *mode;
+				drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
+				DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
+				break;
+			}
+		}
+	}
+
+	if (!native_mode->clock) {
+		DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
+		radeon_encoder->rmx_type = RMX_OFF;
+	}
+}
+
+static int radeon_lvds_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder;
+	int ret = 0;
+	struct drm_display_mode *mode;
+
+	if (radeon_connector->ddc_bus) {
+		ret = radeon_ddc_get_modes(radeon_connector);
+		if (ret > 0) {
+			encoder = radeon_best_single_encoder(connector);
+			if (encoder) {
+				radeon_fixup_lvds_native_mode(encoder, connector);
+				/* add scaled modes */
+				radeon_add_common_modes(encoder, connector);
+			}
+			return ret;
+		}
+	}
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		return 0;
+
+	/* we have no EDID modes */
+	mode = radeon_fp_native_mode(encoder);
+	if (mode) {
+		ret = 1;
+		drm_mode_probed_add(connector, mode);
+		/* add the width/height from vbios tables if available */
+		connector->display_info.width_mm = mode->width_mm;
+		connector->display_info.height_mm = mode->height_mm;
+		/* add scaled modes */
+		radeon_add_common_modes(encoder, connector);
+	}
+
+	return ret;
+}
+
+static int radeon_lvds_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+	if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+		return MODE_PANEL;
+
+	if (encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+		/* AVIVO hardware supports downscaling modes larger than the panel
+		 * to the panel size, but I'm not sure this is desirable.
+		 */
+		if ((mode->hdisplay > native_mode->hdisplay) ||
+		    (mode->vdisplay > native_mode->vdisplay))
+			return MODE_PANEL;
+
+		/* if scaling is disabled, block non-native modes */
+		if (radeon_encoder->rmx_type == RMX_OFF) {
+			if ((mode->hdisplay != native_mode->hdisplay) ||
+			    (mode->vdisplay != native_mode->vdisplay))
+				return MODE_PANEL;
+		}
+	}
+
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+
+	if (encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+		/* check if panel is valid */
+		if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+			ret = connector_status_connected;
+
+	}
+
+	/* check for edid as well */
+	if (radeon_connector->edid)
+		ret = connector_status_connected;
+	else {
+		if (radeon_connector->ddc_bus) {
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      radeon_connector->ddc_bus->adapter);
+			if (radeon_connector->edid)
+				ret = connector_status_connected;
+		}
+	}
+	/* check acpi lid status ??? */
+
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+static void radeon_connector_destroy(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_connector->edid)
+		free(radeon_connector->edid, DRM_MEM_KMS);
+	free(radeon_connector->con_priv, DRM_MEM_DRIVER);
+#ifdef FREEBSD_WIP
+	drm_sysfs_connector_remove(connector);
+#endif /* FREEBSD_WIP */
+	drm_connector_cleanup(connector);
+	free(connector, DRM_MEM_DRIVER);
+}
+
+static int radeon_lvds_set_property(struct drm_connector *connector,
+				    struct drm_property *property,
+				    uint64_t value)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_encoder *radeon_encoder;
+	enum radeon_rmx_type rmx_type;
+
+	DRM_DEBUG_KMS("\n");
+	if (property != dev->mode_config.scaling_mode_property)
+		return 0;
+
+	if (connector->encoder)
+		radeon_encoder = to_radeon_encoder(connector->encoder);
+	else {
+		struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+		radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+	}
+
+	switch (value) {
+	case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+	case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+	case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+	default:
+	case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+	}
+	if (radeon_encoder->rmx_type == rmx_type)
+		return 0;
+
+	radeon_encoder->rmx_type = rmx_type;
+
+	radeon_property_change_mode(&radeon_encoder->base);
+	return 0;
+}
+
+
+static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
+	.get_modes = radeon_lvds_get_modes,
+	.mode_valid = radeon_lvds_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_lvds_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_lvds_set_property,
+};
+
+static int radeon_vga_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int ret;
+
+	ret = radeon_ddc_get_modes(radeon_connector);
+
+	return ret;
+}
+
+static int radeon_vga_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* XXX check mode bandwidth */
+
+	if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	bool dret = false;
+	enum drm_connector_status ret = connector_status_disconnected;
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		ret = connector_status_disconnected;
+
+	if (radeon_connector->ddc_bus)
+		dret = radeon_ddc_probe(radeon_connector, false);
+	if (dret) {
+		radeon_connector->detected_by_load = false;
+		if (radeon_connector->edid) {
+			free(radeon_connector->edid, DRM_MEM_KMS);
+			radeon_connector->edid = NULL;
+		}
+		radeon_connector->edid = drm_get_edid(&radeon_connector->base, radeon_connector->ddc_bus->adapter);
+
+		if (!radeon_connector->edid) {
+			DRM_DEBUG_KMS("%s: probed a monitor but no|invalid EDID\n",
+					drm_get_connector_name(connector));
+			ret = connector_status_connected;
+		} else {
+			radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+			/* some oems have boards with separate digital and analog connectors
+			 * with a shared ddc line (often vga + hdmi)
+			 */
+			if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
+				free(radeon_connector->edid, DRM_MEM_KMS);
+				radeon_connector->edid = NULL;
+				ret = connector_status_disconnected;
+			} else
+				ret = connector_status_connected;
+		}
+	} else {
+
+		/* if we aren't forcing don't do destructive polling */
+		if (!force) {
+			/* only return the previous status if we last
+			 * detected a monitor via load.
+			 */
+			if (radeon_connector->detected_by_load)
+				return connector->status;
+			else
+				return ret;
+		}
+
+		if (radeon_connector->dac_load_detect && encoder) {
+			encoder_funcs = encoder->helper_private;
+			ret = encoder_funcs->detect(encoder, connector);
+			if (ret != connector_status_disconnected)
+				radeon_connector->detected_by_load = true;
+		}
+	}
+
+	if (ret == connector_status_connected)
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+
+	/* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+	 * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+	 * by other means, assume the CRT is connected and use that EDID.
+	 */
+	if ((!rdev->is_atom_bios) &&
+	    (ret == connector_status_disconnected) &&
+	    rdev->mode_info.bios_hardcoded_edid_size) {
+		ret = connector_status_connected;
+	}
+
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
+	.get_modes = radeon_vga_get_modes,
+	.mode_valid = radeon_vga_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_vga_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_vga_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_connector_set_property,
+};
+
+static int radeon_tv_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_display_mode *tv_mode;
+	struct drm_encoder *encoder;
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		return 0;
+
+	/* avivo chips can scale any mode */
+	if (rdev->family >= CHIP_RS600)
+		/* add scaled modes */
+		radeon_add_common_modes(encoder, connector);
+	else {
+		/* only 800x600 is supported right now on pre-avivo chips */
+		tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
+		tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+		drm_mode_probed_add(connector, tv_mode);
+	}
+	return 1;
+}
+
+static int radeon_tv_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
+		return MODE_CLOCK_RANGE;
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+
+	if (!radeon_connector->dac_load_detect)
+		return ret;
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		ret = connector_status_disconnected;
+	else {
+		encoder_funcs = encoder->helper_private;
+		ret = encoder_funcs->detect(encoder, connector);
+	}
+	if (ret == connector_status_connected)
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
+	.get_modes = radeon_tv_get_modes,
+	.mode_valid = radeon_tv_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_tv_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_tv_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_connector_set_property,
+};
+
+static int radeon_dvi_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int ret;
+
+	ret = radeon_ddc_get_modes(radeon_connector);
+	return ret;
+}
+
+static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status status;
+
+	/* We only trust HPD on R600 and newer ASICS. */
+	if (rdev->family >= CHIP_R600
+	  && radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
+		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+			status = connector_status_connected;
+		else
+			status = connector_status_disconnected;
+		if (connector->status == status)
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ * DVI is complicated
+ * Do a DDC probe, if DDC probe passes, get the full EDID so
+ * we can do analog/digital monitor detection at this point.
+ * If the monitor is an analog monitor or we got no DDC,
+ * we need to find the DAC encoder object for this connector.
+ * If we got no DDC, we do load detection on the DAC encoder object.
+ * If we got analog DDC or load detection passes on the DAC encoder
+ * we have to check if this analog encoder is shared with anyone else (TV)
+ * if its shared we have to set the other connector to disconnected.
+ */
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder = NULL;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct drm_mode_object *obj;
+	int i;
+	enum drm_connector_status ret = connector_status_disconnected;
+	bool dret = false, broken_edid = false;
+
+	if (!force && radeon_check_hpd_status_unchanged(connector))
+		return connector->status;
+
+	if (radeon_connector->ddc_bus)
+		dret = radeon_ddc_probe(radeon_connector, false);
+	if (dret) {
+		radeon_connector->detected_by_load = false;
+		if (radeon_connector->edid) {
+			free(radeon_connector->edid, DRM_MEM_KMS);
+			radeon_connector->edid = NULL;
+		}
+		radeon_connector->edid = drm_get_edid(&radeon_connector->base, radeon_connector->ddc_bus->adapter);
+
+		if (!radeon_connector->edid) {
+			DRM_DEBUG_KMS("%s: probed a monitor but no|invalid EDID\n",
+					drm_get_connector_name(connector));
+			/* rs690 seems to have a problem with connectors not existing and always
+			 * return a block of 0's. If we see this just stop polling on this output */
+			if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+				ret = connector_status_disconnected;
+				DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
+				radeon_connector->ddc_bus = NULL;
+			} else {
+				ret = connector_status_connected;
+				broken_edid = true; /* defer use_digital to later */
+			}
+		} else {
+			radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+			/* some oems have boards with separate digital and analog connectors
+			 * with a shared ddc line (often vga + hdmi)
+			 */
+			if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
+				free(radeon_connector->edid, DRM_MEM_KMS);
+				radeon_connector->edid = NULL;
+				ret = connector_status_disconnected;
+			} else
+				ret = connector_status_connected;
+
+			/* This gets complicated.  We have boards with VGA + HDMI with a
+			 * shared DDC line and we have boards with DVI-D + HDMI with a shared
+			 * DDC line.  The latter is more complex because with DVI<->HDMI adapters
+			 * you don't really know what's connected to which port as both are digital.
+			 */
+			if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
+				struct drm_connector *list_connector;
+				struct radeon_connector *list_radeon_connector;
+				list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+					if (connector == list_connector)
+						continue;
+					list_radeon_connector = to_radeon_connector(list_connector);
+					if (list_radeon_connector->shared_ddc &&
+					    (list_radeon_connector->ddc_bus->rec.i2c_id ==
+					     radeon_connector->ddc_bus->rec.i2c_id)) {
+						/* cases where both connectors are digital */
+						if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+							/* hpd is our only option in this case */
+							if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+								free(radeon_connector->edid, DRM_MEM_KMS);
+								radeon_connector->edid = NULL;
+								ret = connector_status_disconnected;
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
+		goto out;
+
+	/* DVI-D and HDMI-A are digital only */
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
+		goto out;
+
+	/* if we aren't forcing don't do destructive polling */
+	if (!force) {
+		/* only return the previous status if we last
+		 * detected a monitor via load.
+		 */
+		if (radeon_connector->detected_by_load)
+			ret = connector->status;
+		goto out;
+	}
+
+	/* find analog encoder */
+	if (radeon_connector->dac_load_detect) {
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] == 0)
+				break;
+
+			obj = drm_mode_object_find(connector->dev,
+						   connector->encoder_ids[i],
+						   DRM_MODE_OBJECT_ENCODER);
+			if (!obj)
+				continue;
+
+			encoder = obj_to_encoder(obj);
+
+			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
+			    encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
+				continue;
+
+			encoder_funcs = encoder->helper_private;
+			if (encoder_funcs->detect) {
+				if (!broken_edid) {
+					if (ret != connector_status_connected) {
+						/* deal with analog monitors without DDC */
+						ret = encoder_funcs->detect(encoder, connector);
+						if (ret == connector_status_connected) {
+							radeon_connector->use_digital = false;
+						}
+						if (ret != connector_status_disconnected)
+							radeon_connector->detected_by_load = true;
+					}
+				} else {
+					enum drm_connector_status lret;
+					/* assume digital unless load detected otherwise */
+					radeon_connector->use_digital = true;
+					lret = encoder_funcs->detect(encoder, connector);
+					DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+					if (lret == connector_status_connected)
+						radeon_connector->use_digital = false;
+				}
+				break;
+			}
+		}
+	}
+
+	if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) &&
+	    encoder) {
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+	}
+
+	/* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+	 * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+	 * by other means, assume the DFP is connected and use that EDID.  In most
+	 * cases the DVI port is actually a virtual KVM port connected to the service
+	 * processor.
+	 */
+out:
+	if ((!rdev->is_atom_bios) &&
+	    (ret == connector_status_disconnected) &&
+	    rdev->mode_info.bios_hardcoded_edid_size) {
+		radeon_connector->use_digital = true;
+		ret = connector_status_connected;
+	}
+
+	/* updated in get modes as well since we need to know if it's analog or digital */
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+/* okay need to be smart in here about which encoder to pick */
+static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	int i;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+
+		if (radeon_connector->use_digital == true) {
+			if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+				return encoder;
+		} else {
+			if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
+			    encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+				return encoder;
+		}
+	}
+
+	/* see if we have a default encoder  TODO */
+
+	/* then check use digitial */
+	/* pick the first one */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+static void radeon_dvi_force(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	if (connector->force == DRM_FORCE_ON)
+		radeon_connector->use_digital = false;
+	if (connector->force == DRM_FORCE_ON_DIGITAL)
+		radeon_connector->use_digital = true;
+}
+
+static int radeon_dvi_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	/* XXX check mode bandwidth */
+
+	/* clocks over 135 MHz have heat issues with DVI on RV100 */
+	if (radeon_connector->use_digital &&
+	    (rdev->family == CHIP_RV100) &&
+	    (mode->clock > 135000))
+		return MODE_CLOCK_HIGH;
+
+	if (radeon_connector->use_digital && (mode->clock > 165000)) {
+		if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
+		    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
+		    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
+			return MODE_OK;
+		else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
+			if (ASIC_IS_DCE6(rdev)) {
+				/* HDMI 1.3+ supports max clock of 340 Mhz */
+				if (mode->clock > 340000)
+					return MODE_CLOCK_HIGH;
+				else
+					return MODE_OK;
+			} else
+				return MODE_CLOCK_HIGH;
+		} else
+			return MODE_CLOCK_HIGH;
+	}
+
+	/* check against the max pixel clock */
+	if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
+	.get_modes = radeon_dvi_get_modes,
+	.mode_valid = radeon_dvi_mode_valid,
+	.best_encoder = radeon_dvi_encoder,
+};
+
+static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dvi_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_connector_set_property,
+	.destroy = radeon_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	if (radeon_connector->edid)
+		free(radeon_connector->edid, DRM_MEM_KMS);
+	if (radeon_dig_connector->dp_i2c_bus)
+		radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+	free(radeon_connector->con_priv, DRM_MEM_DRIVER);
+#ifdef FREEBSD_WIP
+	drm_sysfs_connector_remove(connector);
+#endif /* FREEBSD_WIP */
+	drm_connector_cleanup(connector);
+	free(connector, DRM_MEM_DRIVER);
+}
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	int ret;
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		struct drm_display_mode *mode;
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+			if (!radeon_dig_connector->edp_on)
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+			ret = radeon_ddc_get_modes(radeon_connector);
+			if (!radeon_dig_connector->edp_on)
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+		} else {
+			/* need to setup ddc on the bridge */
+			if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			    ENCODER_OBJECT_ID_NONE) {
+				if (encoder)
+					radeon_atom_ext_encoder_setup_ddc(encoder);
+			}
+			ret = radeon_ddc_get_modes(radeon_connector);
+		}
+
+		if (ret > 0) {
+			if (encoder) {
+				radeon_fixup_lvds_native_mode(encoder, connector);
+				/* add scaled modes */
+				radeon_add_common_modes(encoder, connector);
+			}
+			return ret;
+		}
+
+		if (!encoder)
+			return 0;
+
+		/* we have no EDID modes */
+		mode = radeon_fp_native_mode(encoder);
+		if (mode) {
+			ret = 1;
+			drm_mode_probed_add(connector, mode);
+			/* add the width/height from vbios tables if available */
+			connector->display_info.width_mm = mode->width_mm;
+			connector->display_info.height_mm = mode->height_mm;
+			/* add scaled modes */
+			radeon_add_common_modes(encoder, connector);
+		}
+	} else {
+		/* need to setup ddc on the bridge */
+		if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			ENCODER_OBJECT_ID_NONE) {
+			if (encoder)
+				radeon_atom_ext_encoder_setup_ddc(encoder);
+		}
+		ret = radeon_ddc_get_modes(radeon_connector);
+	}
+
+	return ret;
+}
+
+u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
+{
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	int i;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_TRAVIS:
+		case ENCODER_OBJECT_ID_NUTMEG:
+			return radeon_encoder->encoder_id;
+		default:
+			break;
+		}
+	}
+
+	return ENCODER_OBJECT_ID_NONE;
+}
+
+bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+{
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	int i;
+	bool found = false;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
+			found = true;
+	}
+
+	return found;
+}
+
+bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE5(rdev) &&
+	    (rdev->clock.dp_extclk >= 53900) &&
+	    radeon_connector_encoder_is_hbr2(connector)) {
+		return true;
+	}
+
+	return false;
+}
+
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+	if (!force && radeon_check_hpd_status_unchanged(connector))
+		return connector->status;
+
+	if (radeon_connector->edid) {
+		free(radeon_connector->edid, DRM_MEM_KMS);
+		radeon_connector->edid = NULL;
+	}
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		if (encoder) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+			/* check if panel is valid */
+			if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+				ret = connector_status_connected;
+		}
+		/* eDP is always DP */
+		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		if (!radeon_dig_connector->edp_on)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_ON);
+		if (radeon_dp_getdpcd(radeon_connector))
+			ret = connector_status_connected;
+		if (!radeon_dig_connector->edp_on)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+	} else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+		   ENCODER_OBJECT_ID_NONE) {
+		/* DP bridges are always DP */
+		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		/* get the DPCD from the bridge */
+		radeon_dp_getdpcd(radeon_connector);
+
+		if (encoder) {
+			/* setup ddc on the bridge */
+			radeon_atom_ext_encoder_setup_ddc(encoder);
+			/* bridge chips are always aux */
+			if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
+				ret = connector_status_connected;
+			else if (radeon_connector->dac_load_detect) { /* try load detection */
+				struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+				ret = encoder_funcs->detect(encoder, connector);
+			}
+		}
+	} else {
+		radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+			ret = connector_status_connected;
+			if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+				radeon_dp_getdpcd(radeon_connector);
+		} else {
+			if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+				if (radeon_dp_getdpcd(radeon_connector))
+					ret = connector_status_connected;
+			} else {
+				/* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+				if (radeon_ddc_probe(radeon_connector, false))
+					ret = connector_status_connected;
+			}
+		}
+	}
+
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+static int radeon_dp_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	/* XXX check mode bandwidth */
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+		if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+			return MODE_PANEL;
+
+		if (encoder) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+			/* AVIVO hardware supports downscaling modes larger than the panel
+			 * to the panel size, but I'm not sure this is desirable.
+			 */
+			if ((mode->hdisplay > native_mode->hdisplay) ||
+			    (mode->vdisplay > native_mode->vdisplay))
+				return MODE_PANEL;
+
+			/* if scaling is disabled, block non-native modes */
+			if (radeon_encoder->rmx_type == RMX_OFF) {
+				if ((mode->hdisplay != native_mode->hdisplay) ||
+				    (mode->vdisplay != native_mode->vdisplay))
+					return MODE_PANEL;
+			}
+		}
+		return MODE_OK;
+	} else {
+		if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return radeon_dp_mode_valid_helper(connector, mode);
+		else
+			return MODE_OK;
+	}
+}
+
+static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+	.get_modes = radeon_dp_get_modes,
+	.mode_valid = radeon_dp_mode_valid,
+	.best_encoder = radeon_dvi_encoder,
+};
+
+static const struct drm_connector_funcs radeon_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_connector_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+void
+radeon_add_atom_connector(struct drm_device *dev,
+			  uint32_t connector_id,
+			  uint32_t supported_device,
+			  int connector_type,
+			  struct radeon_i2c_bus_rec *i2c_bus,
+			  uint32_t igp_lane_info,
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd,
+			  struct radeon_router *router)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *radeon_dig_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	uint32_t subpixel_order = SubPixelNone;
+	bool shared_ddc = false;
+	bool is_dp_bridge = false;
+
+	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+		return;
+
+	/* if the user selected tv=0 don't try and add the connector */
+	if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+	     (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+	     (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+	    (radeon_tv == 0))
+		return;
+
+	/* see if we already added it */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->connector_id == connector_id) {
+			radeon_connector->devices |= supported_device;
+			return;
+		}
+		if (radeon_connector->ddc_bus && i2c_bus->valid) {
+			if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
+				radeon_connector->shared_ddc = true;
+				shared_ddc = true;
+			}
+			if (radeon_connector->router_bus && router->ddc_valid &&
+			    (radeon_connector->router.router_id == router->router_id)) {
+				radeon_connector->shared_ddc = false;
+				shared_ddc = false;
+			}
+		}
+	}
+
+	/* check if it's a dp bridge */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->devices & supported_device) {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_TRAVIS:
+			case ENCODER_OBJECT_ID_NUTMEG:
+				is_dp_bridge = true;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	radeon_connector = malloc(sizeof(struct radeon_connector),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!radeon_connector)
+		return;
+
+	connector = &radeon_connector->base;
+
+	radeon_connector->connector_id = connector_id;
+	radeon_connector->devices = supported_device;
+	radeon_connector->shared_ddc = shared_ddc;
+	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
+
+	radeon_connector->router = *router;
+	if (router->ddc_valid || router->cd_valid) {
+		radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
+		if (!radeon_connector->router_bus)
+			DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
+	}
+
+	if (is_dp_bridge) {
+		radeon_dig_connector = malloc(
+		    sizeof(struct radeon_connector_atom_dig),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (!radeon_dig_connector)
+			goto failed;
+		radeon_dig_connector->igp_lane_info = igp_lane_info;
+		radeon_connector->con_priv = radeon_dig_connector;
+		drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			/* add DP i2c bus */
+			if (connector_type == DRM_MODE_CONNECTOR_eDP)
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+			else
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+			if (!radeon_dig_connector->dp_i2c_bus)
+				DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		switch (connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+		case DRM_MODE_CONNECTOR_DVIA:
+		default:
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+		case DRM_MODE_CONNECTOR_DisplayPort:
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_property,
+						      UNDERSCAN_OFF);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_hborder_property,
+						      0);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_vborder_property,
+						      0);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+				radeon_connector->dac_load_detect = true;
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.load_detect_property,
+							      1);
+			}
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+		case DRM_MODE_CONNECTOR_eDP:
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		}
+	} else {
+		switch (connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			break;
+		case DRM_MODE_CONNECTOR_DVIA:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+			radeon_dig_connector = malloc(
+			    sizeof(struct radeon_connector_atom_dig),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+			}
+			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+				radeon_connector->dac_load_detect = true;
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.load_detect_property,
+							      1);
+			}
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_DVII)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+			radeon_dig_connector = malloc(
+			    sizeof(struct radeon_connector_atom_dig),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_DisplayPort:
+			radeon_dig_connector = malloc(
+			    sizeof(struct radeon_connector_atom_dig),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				/* add DP i2c bus */
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+				if (!radeon_dig_connector->dp_i2c_bus)
+					DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+			}
+			connector->interlace_allowed = true;
+			/* in theory with a DP to VGA converter... */
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_eDP:
+			radeon_dig_connector = malloc(
+			    sizeof(struct radeon_connector_atom_dig),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				/* add DP i2c bus */
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+				if (!radeon_dig_connector->dp_i2c_bus)
+					DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_SVIDEO:
+		case DRM_MODE_CONNECTOR_Composite:
+		case DRM_MODE_CONNECTOR_9PinDIN:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.tv_std_property,
+						      radeon_atombios_get_tv_info(rdev));
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+			radeon_dig_connector = malloc(
+			    sizeof(struct radeon_connector_atom_dig),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		}
+	}
+
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+		if (i2c_bus->valid)
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	connector->display_info.subpixel_order = subpixel_order;
+#ifdef FREEBSD_WIP
+	drm_sysfs_connector_add(connector);
+#endif /* FREEBSD_WIP */
+	return;
+
+failed:
+	drm_connector_cleanup(connector);
+	free(connector, DRM_MEM_DRIVER);
+}
+
+void
+radeon_add_legacy_connector(struct drm_device *dev,
+			    uint32_t connector_id,
+			    uint32_t supported_device,
+			    int connector_type,
+			    struct radeon_i2c_bus_rec *i2c_bus,
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	uint32_t subpixel_order = SubPixelNone;
+
+	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+		return;
+
+	/* if the user selected tv=0 don't try and add the connector */
+	if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+	     (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+	     (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+	    (radeon_tv == 0))
+		return;
+
+	/* see if we already added it */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->connector_id == connector_id) {
+			radeon_connector->devices |= supported_device;
+			return;
+		}
+	}
+
+	radeon_connector = malloc(sizeof(struct radeon_connector),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!radeon_connector)
+		return;
+
+	connector = &radeon_connector->base;
+
+	radeon_connector->connector_id = connector_id;
+	radeon_connector->devices = supported_device;
+	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
+
+	switch (connector_type) {
+	case DRM_MODE_CONNECTOR_VGA:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		radeon_connector->dac_load_detect = true;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      1);
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+		connector->interlace_allowed = true;
+		connector->doublescan_allowed = true;
+		break;
+	case DRM_MODE_CONNECTOR_DVIA:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		radeon_connector->dac_load_detect = true;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      1);
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->interlace_allowed = true;
+		connector->doublescan_allowed = true;
+		break;
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_DVID:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+		}
+		subpixel_order = SubPixelHorizontalRGB;
+		connector->interlace_allowed = true;
+		if (connector_type == DRM_MODE_CONNECTOR_DVII)
+			connector->doublescan_allowed = true;
+		else
+			connector->doublescan_allowed = false;
+		break;
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+		radeon_connector->dac_load_detect = true;
+		/* RS400,RC410,RS480 chipset seems to report a lot
+		 * of false positive on load detect, we haven't yet
+		 * found a way to make load detect reliable on those
+		 * chipset, thus just disable it for TV.
+		 */
+		if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+			radeon_connector->dac_load_detect = false;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      radeon_connector->dac_load_detect);
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.tv_std_property,
+					      radeon_combios_get_tv_info(rdev));
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->interlace_allowed = false;
+		connector->doublescan_allowed = false;
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		drm_object_attach_property(&radeon_connector->base.base,
+					      dev->mode_config.scaling_mode_property,
+					      DRM_MODE_SCALE_FULLSCREEN);
+		subpixel_order = SubPixelHorizontalRGB;
+		connector->interlace_allowed = false;
+		connector->doublescan_allowed = false;
+		break;
+	}
+
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+		if (i2c_bus->valid)
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+	connector->display_info.subpixel_order = subpixel_order;
+#ifdef FREEBSD_WIP
+	drm_sysfs_connector_add(connector);
+#endif /* FREEBSD_WIP */
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_connectors.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_cp.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_cp.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_cp.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2251 @@
+/* $MidnightBSD$ */
+/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2007 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Kevin E. Martin <martin at valinux.com>
+ *    Gareth Hughes <gareth at valinux.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_cp.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/linker.h>
+#include <sys/firmware.h>
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#define RADEON_FIFO_DEBUG	0
+
+/* Firmware Names */
+#define FIRMWARE_R100		"radeonkmsfw_R100_cp"
+#define FIRMWARE_R200		"radeonkmsfw_R200_cp"
+#define FIRMWARE_R300		"radeonkmsfw_R300_cp"
+#define FIRMWARE_R420		"radeonkmsfw_R420_cp"
+#define FIRMWARE_RS690		"radeonkmsfw_RS690_cp"
+#define FIRMWARE_RS600		"radeonkmsfw_RS600_cp"
+#define FIRMWARE_R520		"radeonkmsfw_R520_cp"
+
+#ifdef __linux__
+MODULE_FIRMWARE(FIRMWARE_R100);
+MODULE_FIRMWARE(FIRMWARE_R200);
+MODULE_FIRMWARE(FIRMWARE_R300);
+MODULE_FIRMWARE(FIRMWARE_R420);
+MODULE_FIRMWARE(FIRMWARE_RS690);
+MODULE_FIRMWARE(FIRMWARE_RS600);
+MODULE_FIRMWARE(FIRMWARE_R520);
+#endif
+
+static int radeon_do_cleanup_cp(struct drm_device * dev);
+static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
+
+u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off)
+{
+	u32 val;
+
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		val = DRM_READ32(dev_priv->ring_rptr, off);
+	} else {
+		val = *(((volatile u32 *)
+			 dev_priv->ring_rptr->handle) +
+			(off / sizeof(u32)));
+		val = le32_to_cpu(val);
+	}
+	return val;
+}
+
+u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv)
+{
+	if (dev_priv->writeback_works)
+		return radeon_read_ring_rptr(dev_priv, 0);
+	else {
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			return RADEON_READ(R600_CP_RB_RPTR);
+		else
+			return RADEON_READ(RADEON_CP_RB_RPTR);
+	}
+}
+
+void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val)
+{
+	if (dev_priv->flags & RADEON_IS_AGP)
+		DRM_WRITE32(dev_priv->ring_rptr, off, val);
+	else
+		*(((volatile u32 *) dev_priv->ring_rptr->handle) +
+		  (off / sizeof(u32))) = cpu_to_le32(val);
+}
+
+void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val)
+{
+	radeon_write_ring_rptr(dev_priv, 0, val);
+}
+
+u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
+{
+	if (dev_priv->writeback_works) {
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			return radeon_read_ring_rptr(dev_priv,
+						     R600_SCRATCHOFF(index));
+		else
+			return radeon_read_ring_rptr(dev_priv,
+						     RADEON_SCRATCHOFF(index));
+	} else {
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			return RADEON_READ(R600_SCRATCH_REG0 + 4*index);
+		else
+			return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index);
+	}
+}
+
+static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
+	ret = RADEON_READ(R520_MC_IND_DATA);
+	RADEON_WRITE(R520_MC_IND_INDEX, 0);
+	return ret;
+}
+
+static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
+	ret = RADEON_READ(RS480_NB_MC_DATA);
+	RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
+	return ret;
+}
+
+static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
+	ret = RADEON_READ(RS690_MC_DATA);
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
+	return ret;
+}
+
+static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) |
+				      RS600_MC_IND_CITF_ARB0));
+	ret = RADEON_READ(RS600_MC_DATA);
+	return ret;
+}
+
+static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+		return RS690_READ_MCIND(dev_priv, addr);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+		return RS600_READ_MCIND(dev_priv, addr);
+	else
+		return RS480_READ_MCIND(dev_priv, addr);
+}
+
+u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
+{
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+		return RADEON_READ(R700_MC_VM_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return RADEON_READ(R600_MC_VM_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+		return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+		return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
+	else
+		return RADEON_READ(RADEON_MC_FB_LOCATION);
+}
+
+static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
+{
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+		RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+		RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+		RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
+	else
+		RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
+}
+
+void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
+{
+	/*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+		RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
+		RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+		RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
+		RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+		RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+		RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
+	else
+		RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
+}
+
+void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
+{
+	u32 agp_base_hi = upper_32_bits(agp_base);
+	u32 agp_base_lo = agp_base & 0xffffffff;
+	u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff;
+
+	/* R6xx/R7xx must be aligned to a 4MB boundary */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+		RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
+		R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
+		R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
+		RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+		RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo);
+		RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
+		R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
+		R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
+		RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
+	} else {
+		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
+			RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
+	}
+}
+
+void radeon_enable_bm(struct drm_radeon_private *dev_priv)
+{
+	u32 tmp;
+	/* Turn on bus mastering */
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		/* rs600/rs690/rs740 */
+		tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+		RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+		/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+		tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+		RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	} /* PCIE cards appears to not need this */
+}
+
+static int RADEON_READ_PLL(struct drm_device * dev, int addr)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
+	return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
+}
+
+static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
+{
+	RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
+	return RADEON_READ(RADEON_PCIE_DATA);
+}
+
+#if RADEON_FIFO_DEBUG
+static void radeon_status(drm_radeon_private_t * dev_priv)
+{
+	printk("%s:\n", __func__);
+	printk("RBBM_STATUS = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
+	printk("CP_RB_RTPR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
+	printk("CP_RB_WTPR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
+	printk("AIC_CNTL = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
+	printk("AIC_STAT = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_STAT));
+	printk("AIC_PT_BASE = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
+	printk("TLB_ADDR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
+	printk("TLB_DATA = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
+}
+#endif
+
+/* ================================================================
+ * Engine, FIFO control
+ */
+
+static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
+{
+	u32 tmp;
+	int i;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
+		tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
+		tmp |= RADEON_RB3D_DC_FLUSH_ALL;
+		RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
+
+		for (i = 0; i < dev_priv->usec_timeout; i++) {
+			if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
+			      & RADEON_RB3D_DC_BUSY)) {
+				return 0;
+			}
+			DRM_UDELAY(1);
+		}
+	} else {
+		/* don't flush or purge cache here or lockup */
+		return 0;
+	}
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
+{
+	int i;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		int slots = (RADEON_READ(RADEON_RBBM_STATUS)
+			     & RADEON_RBBM_FIFOCNT_MASK);
+		if (slots >= entries)
+			return 0;
+		DRM_UDELAY(1);
+	}
+	DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n",
+		 RADEON_READ(RADEON_RBBM_STATUS),
+		 RADEON_READ(R300_VAP_CNTL_STATUS));
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
+{
+	int i, ret;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	ret = radeon_do_wait_for_fifo(dev_priv, 64);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(RADEON_READ(RADEON_RBBM_STATUS)
+		      & RADEON_RBBM_ACTIVE)) {
+			radeon_do_pixcache_flush(dev_priv);
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n",
+		 RADEON_READ(RADEON_RBBM_STATUS),
+		 RADEON_READ(R300_VAP_CNTL_STATUS));
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static void radeon_init_pipes(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	uint32_t gb_tile_config, gb_pipe_sel = 0;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
+		uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2);
+		if ((z_pipe_sel & 3) == 3)
+			dev_priv->num_z_pipes = 2;
+		else
+			dev_priv->num_z_pipes = 1;
+	} else
+		dev_priv->num_z_pipes = 1;
+
+	/* RS4xx/RS6xx/R4xx/R5xx */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
+		gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
+		dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+		/* SE cards have 1 pipe */
+		if ((dev->pci_device == 0x5e4c) ||
+		    (dev->pci_device == 0x5e4f))
+			dev_priv->num_gb_pipes = 1;
+	} else {
+		/* R3xx */
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
+		     dev->pci_device != 0x4144) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
+		     dev->pci_device != 0x4148)) {
+			dev_priv->num_gb_pipes = 2;
+		} else {
+			/* RV3xx/R300 AD/R350 AH */
+			dev_priv->num_gb_pipes = 1;
+		}
+	}
+	DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
+
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
+
+	switch (dev_priv->num_gb_pipes) {
+	case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
+	case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
+	case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
+	default:
+	case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+		RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
+		RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
+	}
+	RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
+	radeon_do_wait_for_idle(dev_priv);
+	RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
+	RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
+					       R300_DC_AUTOFLUSH_ENABLE |
+					       R300_DC_DC_DISABLE_IGNORE_PE));
+
+
+}
+
+/* ================================================================
+ * CP control, initialization
+ */
+
+/* Load the microcode for the CP */
+static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv)
+{
+	const char *fw_name = NULL;
+	int err;
+
+	DRM_DEBUG("\n");
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
+		DRM_INFO("Loading R100 Microcode\n");
+		fw_name = FIRMWARE_R100;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
+		DRM_INFO("Loading R200 Microcode\n");
+		fw_name = FIRMWARE_R200;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+		DRM_INFO("Loading R300 Microcode\n");
+		fw_name = FIRMWARE_R300;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
+		DRM_INFO("Loading R400 Microcode\n");
+		fw_name = FIRMWARE_R420;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		DRM_INFO("Loading RS690/RS740 Microcode\n");
+		fw_name = FIRMWARE_RS690;
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+		DRM_INFO("Loading RS600 Microcode\n");
+		fw_name = FIRMWARE_RS600;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
+		DRM_INFO("Loading R500 Microcode\n");
+		fw_name = FIRMWARE_R520;
+	}
+
+	err = 0;
+
+	dev_priv->me_fw = firmware_get(fw_name);
+	if (dev_priv->me_fw == NULL) {
+		err = -ENOENT;
+		DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n",
+		       fw_name);
+	} else if (dev_priv->me_fw->datasize % 8) {
+		DRM_ERROR(
+		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
+		       dev_priv->me_fw->datasize, fw_name);
+		err = -EINVAL;
+		firmware_put(dev_priv->me_fw, FIRMWARE_UNLOAD);
+		dev_priv->me_fw = NULL;
+	}
+	return err;
+}
+
+static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+	const __be32 *fw_data;
+	int i, size;
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	if (dev_priv->me_fw) {
+		size = dev_priv->me_fw->datasize / 4;
+		fw_data = (const __be32 *)dev_priv->me_fw->data;
+		RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
+		for (i = 0; i < size; i += 2) {
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+				     be32_to_cpup(&fw_data[i]));
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+				     be32_to_cpup(&fw_data[i + 1]));
+		}
+	}
+}
+
+/* Flush any pending commands to the CP.  This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
+{
+	DRM_DEBUG("\n");
+#if 0
+	u32 tmp;
+
+	tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1U << 31);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
+#endif
+}
+
+/* Wait for the CP to go idle.
+ */
+int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(6);
+
+	RADEON_PURGE_CACHE();
+	RADEON_PURGE_ZCACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return radeon_do_wait_for_idle(dev_priv);
+}
+
+/* Start the Command Processor.
+ */
+static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
+
+	dev_priv->cp_running = 1;
+
+	/* on r420, any DMA from CP to system memory while 2D is active
+	 * can cause a hang.  workaround is to queue a CP RESYNC token
+	 */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
+		BEGIN_RING(3);
+		OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1));
+		OUT_RING(5); /* scratch reg 5 */
+		OUT_RING(0xdeadbeef);
+		ADVANCE_RING();
+		COMMIT_RING();
+	}
+
+	BEGIN_RING(8);
+	/* isync can only be written through cp on r5xx write it here */
+	OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
+	OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
+		 RADEON_ISYNC_ANY3D_IDLE2D |
+		 RADEON_ISYNC_WAIT_IDLEGUI |
+		 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+	RADEON_PURGE_CACHE();
+	RADEON_PURGE_ZCACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
+}
+
+/* Reset the Command Processor.  This will not flush any pending
+ * commands, so you must wait for the CP command stream to complete
+ * before calling this routine.
+ */
+static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
+{
+	u32 cur_read_ptr;
+	DRM_DEBUG("\n");
+
+	cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+	SET_RING_HEAD(dev_priv, cur_read_ptr);
+	dev_priv->ring.tail = cur_read_ptr;
+}
+
+/* Stop the Command Processor.  This will not flush any pending
+ * commands, so you must flush the command stream and wait for the CP
+ * to go idle before calling this routine.
+ */
+static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* finish the pending CP_RESYNC token */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
+		BEGIN_RING(2);
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+		OUT_RING(R300_RB3D_DC_FINISH);
+		ADVANCE_RING();
+		COMMIT_RING();
+		radeon_do_wait_for_idle(dev_priv);
+	}
+
+	RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
+
+	dev_priv->cp_running = 0;
+}
+
+/* Reset the engine.  This will stop the CP if it is running.
+ */
+static int radeon_do_engine_reset(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
+	DRM_DEBUG("\n");
+
+	radeon_do_pixcache_flush(dev_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
+		/* may need something similar for newer chips */
+		clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
+		mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
+
+		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
+						    RADEON_FORCEON_MCLKA |
+						    RADEON_FORCEON_MCLKB |
+						    RADEON_FORCEON_YCLKA |
+						    RADEON_FORCEON_YCLKB |
+						    RADEON_FORCEON_MC |
+						    RADEON_FORCEON_AIC));
+	}
+
+	rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
+					      RADEON_SOFT_RESET_CP |
+					      RADEON_SOFT_RESET_HI |
+					      RADEON_SOFT_RESET_SE |
+					      RADEON_SOFT_RESET_RE |
+					      RADEON_SOFT_RESET_PP |
+					      RADEON_SOFT_RESET_E2 |
+					      RADEON_SOFT_RESET_RB));
+	RADEON_READ(RADEON_RBBM_SOFT_RESET);
+	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
+					      ~(RADEON_SOFT_RESET_CP |
+						RADEON_SOFT_RESET_HI |
+						RADEON_SOFT_RESET_SE |
+						RADEON_SOFT_RESET_RE |
+						RADEON_SOFT_RESET_PP |
+						RADEON_SOFT_RESET_E2 |
+						RADEON_SOFT_RESET_RB)));
+	RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
+		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
+		RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
+		RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+	}
+
+	/* setup the raster pipes */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
+	    radeon_init_pipes(dev);
+
+	/* Reset the CP ring */
+	radeon_do_cp_reset(dev_priv);
+
+	/* The CP is no longer running after an engine reset */
+	dev_priv->cp_running = 0;
+
+	/* Reset any pending vertex, indirect buffers */
+	radeon_freelist_reset(dev);
+
+	return 0;
+}
+
+static void radeon_cp_init_ring_buffer(struct drm_device * dev,
+				       drm_radeon_private_t *dev_priv,
+				       struct drm_file *file_priv)
+{
+	struct drm_radeon_master_private *master_priv;
+	u32 ring_start, cur_read_ptr;
+
+	/* Initialize the memory controller. With new memory map, the fb location
+	 * is not changed, it should have been properly initialized already. Part
+	 * of the problem is that the code below is bogus, assuming the GART is
+	 * always appended to the fb which is not necessarily the case
+	 */
+	if (!dev_priv->new_memmap)
+		radeon_write_fb_location(dev_priv,
+			     ((dev_priv->gart_vm_start - 1) & 0xffff0000)
+			     | (dev_priv->fb_location >> 16));
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		radeon_write_agp_base(dev_priv, dev->agp->base);
+
+		radeon_write_agp_location(dev_priv,
+			     (((dev_priv->gart_vm_start - 1 +
+				dev_priv->gart_size) & 0xffff0000) |
+			      (dev_priv->gart_vm_start >> 16)));
+
+		ring_start = (dev_priv->cp_ring->offset
+			      - dev->agp->base
+			      + dev_priv->gart_vm_start);
+	} else
+#endif
+		ring_start = (dev_priv->cp_ring->offset
+			      - (unsigned long)dev->sg->vaddr
+			      + dev_priv->gart_vm_start);
+
+	RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
+
+	/* Set the write pointer delay */
+	RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+	SET_RING_HEAD(dev_priv, cur_read_ptr);
+	dev_priv->ring.tail = cur_read_ptr;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
+			     dev_priv->ring_rptr->offset
+			     - dev->agp->base + dev_priv->gart_vm_start);
+	} else
+#endif
+	{
+		RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
+			     dev_priv->ring_rptr->offset
+			     - ((unsigned long) dev->sg->vaddr)
+			     + dev_priv->gart_vm_start);
+	}
+
+	/* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(RADEON_CP_RB_CNTL,
+		     RADEON_BUF_SWAP_32BIT |
+		     (dev_priv->ring.fetch_size_l2ow << 18) |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(RADEON_CP_RB_CNTL,
+		     (dev_priv->ring.fetch_size_l2ow << 18) |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+
+	/* Initialize the scratch register pointer.  This will cause
+	 * the scratch register values to be written out to memory
+	 * whenever they are updated.
+	 *
+	 * We simply put this behind the ring read pointer, this works
+	 * with PCI GART as well as (whatever kind of) AGP GART
+	 */
+	RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
+		     + RADEON_SCRATCH_REG_OFFSET);
+
+	RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
+
+	radeon_enable_bm(dev_priv);
+
+	radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0);
+	RADEON_WRITE(RADEON_LAST_FRAME_REG, 0);
+
+	radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
+	RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0);
+
+	radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0);
+	RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
+
+	/* reset sarea copies of these */
+	master_priv = file_priv->master->driver_priv;
+	if (master_priv->sarea_priv) {
+		master_priv->sarea_priv->last_frame = 0;
+		master_priv->sarea_priv->last_dispatch = 0;
+		master_priv->sarea_priv->last_clear = 0;
+	}
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	/* Sync everything up */
+	RADEON_WRITE(RADEON_ISYNC_CNTL,
+		     (RADEON_ISYNC_ANY2D_IDLE3D |
+		      RADEON_ISYNC_ANY3D_IDLE2D |
+		      RADEON_ISYNC_WAIT_IDLEGUI |
+		      RADEON_ISYNC_CPSCRATCH_IDLEGUI));
+
+}
+
+static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
+{
+	u32 tmp;
+
+	/* Start with assuming that writeback doesn't work */
+	dev_priv->writeback_works = 0;
+
+	/* Writeback doesn't seem to work everywhere, test it here and possibly
+	 * enable it if it appears to work
+	 */
+	radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
+
+	RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
+
+	for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+		u32 val;
+
+		val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
+		if (val == 0xdeadbeef)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (tmp < dev_priv->usec_timeout) {
+		dev_priv->writeback_works = 1;
+		DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+	} else {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback test failed\n");
+	}
+	if (radeon_no_wb == 1) {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback forced off\n");
+	}
+
+	if (!dev_priv->writeback_works) {
+		/* Disable writeback to avoid unnecessary bus master transfer */
+		RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
+			     RADEON_RB_NO_UPDATE);
+		RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
+	}
+}
+
+/* Enable or disable IGP GART on the chip */
+static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 temp;
+
+	if (on) {
+		DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
+			  dev_priv->gart_vm_start,
+			  (long)dev_priv->gart_info.bus_addr,
+			  dev_priv->gart_size);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
+							     RS690_BLOCK_GFX_D3_EN));
+		else
+			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
+							       RS480_VA_SIZE_32MB));
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
+		IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
+							RS480_TLB_ENABLE |
+							RS480_GTW_LAC_EN |
+							RS480_1LEVEL_GART));
+
+		temp = dev_priv->gart_info.bus_addr & 0xfffff000;
+		temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
+		IGP_WRITE_MCIND(RS480_GART_BASE, temp);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
+		IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
+						      RS480_REQ_TYPE_SNOOP_DIS));
+
+		radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
+
+		dev_priv->gart_size = 32*1024*1024;
+		temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
+			 0xffff0000) | (dev_priv->gart_vm_start >> 16));
+
+		radeon_write_agp_location(dev_priv, temp);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
+							       RS480_VA_SIZE_32MB));
+
+		do {
+			temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
+			if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
+				break;
+			DRM_UDELAY(1);
+		} while (1);
+
+		IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
+				RS480_GART_CACHE_INVALIDATE);
+
+		do {
+			temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
+			if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
+				break;
+			DRM_UDELAY(1);
+		} while (1);
+
+		IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
+	} else {
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+	}
+}
+
+/* Enable or disable IGP GART on the chip */
+static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on)
+{
+	u32 temp;
+	int i;
+
+	if (on) {
+		DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
+			 dev_priv->gart_vm_start,
+			 (long)dev_priv->gart_info.bus_addr,
+			 dev_priv->gart_size);
+
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
+						    RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
+		for (i = 0; i < 19; i++)
+			IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i,
+					(RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
+					 RS600_SYSTEM_ACCESS_MODE_IN_SYS |
+					 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH |
+					 RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
+					 RS600_ENABLE_FRAGMENT_PROCESSING |
+					 RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
+
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE |
+							     RS600_PAGE_TABLE_TYPE_FLAT));
+
+		/* disable all other contexts */
+		for (i = 1; i < 8; i++)
+			IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
+
+		/* setup the page table aperture */
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+				dev_priv->gart_info.bus_addr);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR,
+				dev_priv->gart_vm_start);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR,
+				(dev_priv->gart_vm_start + dev_priv->gart_size - 1));
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+		/* setup the system aperture */
+		IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR,
+				dev_priv->gart_vm_start);
+		IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR,
+				(dev_priv->gart_vm_start + dev_priv->gart_size - 1));
+
+		/* enable page tables */
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT));
+
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
+		IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES));
+
+		/* invalidate the cache */
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+		temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+		temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+		temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+	} else {
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0);
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
+		temp &= ~RS600_ENABLE_PAGE_TABLES;
+		IGP_WRITE_MCIND(RS600_MC_CNTL1, temp);
+	}
+}
+
+static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
+	if (on) {
+
+		DRM_DEBUG("programming pcie %08X %08lX %08X\n",
+			  dev_priv->gart_vm_start,
+			  (long)dev_priv->gart_info.bus_addr,
+			  dev_priv->gart_size);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
+				  dev_priv->gart_vm_start);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
+				  dev_priv->gart_info.bus_addr);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
+				  dev_priv->gart_vm_start);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
+				  dev_priv->gart_vm_start +
+				  dev_priv->gart_size - 1);
+
+		radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
+
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+				  RADEON_PCIE_TX_GART_EN);
+	} else {
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+				  tmp & ~RADEON_PCIE_TX_GART_EN);
+	}
+}
+
+/* Enable or disable PCI GART on the chip */
+static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 tmp;
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
+	    (dev_priv->flags & RADEON_IS_IGPGART)) {
+		radeon_set_igpgart(dev_priv, on);
+		return;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+		rs600_set_igpgart(dev_priv, on);
+		return;
+	}
+
+	if (dev_priv->flags & RADEON_IS_PCIE) {
+		radeon_set_pciegart(dev_priv, on);
+		return;
+	}
+
+	tmp = RADEON_READ(RADEON_AIC_CNTL);
+
+	if (on) {
+		RADEON_WRITE(RADEON_AIC_CNTL,
+			     tmp | RADEON_PCIGART_TRANSLATE_EN);
+
+		/* set PCI GART page-table base address
+		 */
+		RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
+
+		/* set address range for PCI address translate
+		 */
+		RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
+		RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
+			     + dev_priv->gart_size - 1);
+
+		/* Turn off AGP aperture -- is this required for PCI GART?
+		 */
+		radeon_write_agp_location(dev_priv, 0xffffffc0);
+		RADEON_WRITE(RADEON_AGP_COMMAND, 0);	/* clear AGP_COMMAND */
+	} else {
+		RADEON_WRITE(RADEON_AIC_CNTL,
+			     tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+	}
+}
+
+static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv)
+{
+	struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
+	struct radeon_virt_surface *vp;
+	int i;
+
+	for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) {
+		if (!dev_priv->virt_surfaces[i].file_priv ||
+		    dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV)
+			break;
+	}
+	if (i >= 2 * RADEON_MAX_SURFACES)
+		return -ENOMEM;
+	vp = &dev_priv->virt_surfaces[i];
+
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		struct radeon_surface *sp = &dev_priv->surfaces[i];
+		if (sp->refcount)
+			continue;
+
+		vp->surface_index = i;
+		vp->lower = gart_info->bus_addr;
+		vp->upper = vp->lower + gart_info->table_size;
+		vp->flags = 0;
+		vp->file_priv = PCIGART_FILE_PRIV;
+
+		sp->refcount = 1;
+		sp->lower = vp->lower;
+		sp->upper = vp->upper;
+		sp->flags = 0;
+
+		RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags);
+		RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower);
+		RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper);
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+			     struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+
+	DRM_DEBUG("\n");
+
+	/* if we require new memory map but we don't have it fail */
+	if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
+		DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
+		DRM_DEBUG("Forcing AGP card to PCI mode\n");
+		dev_priv->flags &= ~RADEON_IS_AGP;
+	} else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
+		   && !init->is_pci) {
+		DRM_DEBUG("Restoring AGP flag\n");
+		dev_priv->flags |= RADEON_IS_AGP;
+	}
+
+	if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
+		DRM_ERROR("PCI GART memory not allocated!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->usec_timeout = init->usec_timeout;
+	if (dev_priv->usec_timeout < 1 ||
+	    dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
+		DRM_DEBUG("TIMEOUT problem!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	/* Enable vblank on CRTC1 for older X servers
+	 */
+	dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+
+	switch(init->func) {
+	case RADEON_INIT_R200_CP:
+		dev_priv->microcode_version = UCODE_R200;
+		break;
+	case RADEON_INIT_R300_CP:
+		dev_priv->microcode_version = UCODE_R300;
+		break;
+	default:
+		dev_priv->microcode_version = UCODE_R100;
+	}
+
+	dev_priv->do_boxes = 0;
+	dev_priv->cp_mode = init->cp_mode;
+
+	/* We don't support anything other than bus-mastering ring mode,
+	 * but the ring can be in either AGP or PCI space for the ring
+	 * read pointer.
+	 */
+	if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
+	    (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
+		DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	switch (init->fb_bpp) {
+	case 16:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+		break;
+	case 32:
+	default:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+		break;
+	}
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	switch (init->depth_bpp) {
+	case 16:
+		dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
+		break;
+	case 32:
+	default:
+		dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
+		break;
+	}
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+
+	/* Hardware state for depth clears.  Remove this if/when we no
+	 * longer clear the depth buffer with a 3D rectangle.  Hard-code
+	 * all values to prevent unwanted 3D state from slipping through
+	 * and screwing with the clear operation.
+	 */
+	dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
+					   (dev_priv->color_fmt << 10) |
+					   (dev_priv->microcode_version ==
+					    UCODE_R100 ? RADEON_ZBLOCK16 : 0));
+
+	dev_priv->depth_clear.rb3d_zstencilcntl =
+	    (dev_priv->depth_fmt |
+	     RADEON_Z_TEST_ALWAYS |
+	     RADEON_STENCIL_TEST_ALWAYS |
+	     RADEON_STENCIL_S_FAIL_REPLACE |
+	     RADEON_STENCIL_ZPASS_REPLACE |
+	     RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
+
+	dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
+					 RADEON_BFACE_SOLID |
+					 RADEON_FFACE_SOLID |
+					 RADEON_FLAT_SHADE_VTX_LAST |
+					 RADEON_DIFFUSE_SHADE_FLAT |
+					 RADEON_ALPHA_SHADE_FLAT |
+					 RADEON_SPECULAR_SHADE_FLAT |
+					 RADEON_FOG_SHADE_FLAT |
+					 RADEON_VTX_PIX_CENTER_OGL |
+					 RADEON_ROUND_MODE_TRUNC |
+					 RADEON_ROUND_PREC_8TH_PIX);
+
+
+	dev_priv->ring_offset = init->ring_offset;
+	dev_priv->ring_rptr_offset = init->ring_rptr_offset;
+	dev_priv->buffers_offset = init->buffers_offset;
+	dev_priv->gart_textures_offset = init->gart_textures_offset;
+
+	master_priv->sarea = drm_getsarea(dev);
+	if (!master_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
+	if (!dev_priv->cp_ring) {
+		DRM_ERROR("could not find cp ring region!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+	if (!dev_priv->ring_rptr) {
+		DRM_ERROR("could not find ring read pointer!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		DRM_ERROR("could not find dma buffer region!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->gart_textures_offset) {
+		dev_priv->gart_textures =
+		    drm_core_findmap(dev, init->gart_textures_offset);
+		if (!dev_priv->gart_textures) {
+			DRM_ERROR("could not find GART texture region!\n");
+			radeon_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	}
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		drm_core_ioremap_wc(dev_priv->cp_ring, dev);
+		drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
+		drm_core_ioremap_wc(dev->agp_buffer_map, dev);
+		if (!dev_priv->cp_ring->handle ||
+		    !dev_priv->ring_rptr->handle ||
+		    !dev->agp_buffer_map->handle) {
+			DRM_ERROR("could not find ioremap agp regions!\n");
+			radeon_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	} else
+#endif
+	{
+		dev_priv->cp_ring->handle =
+			(void *)(unsigned long)dev_priv->cp_ring->offset;
+		dev_priv->ring_rptr->handle =
+			(void *)(unsigned long)dev_priv->ring_rptr->offset;
+		dev->agp_buffer_map->handle =
+			(void *)(unsigned long)dev->agp_buffer_map->offset;
+
+		DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
+			  dev_priv->cp_ring->handle);
+		DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
+			  dev_priv->ring_rptr->handle);
+		DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
+			  dev->agp_buffer_map->handle);
+	}
+
+	dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
+	dev_priv->fb_size =
+		((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
+		- dev_priv->fb_location;
+
+	dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
+					((dev_priv->front_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
+				       ((dev_priv->back_offset
+					 + dev_priv->fb_location) >> 10));
+
+	dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
+					((dev_priv->depth_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->gart_size = init->gart_size;
+
+	/* New let's set the memory map ... */
+	if (dev_priv->new_memmap) {
+		u32 base = 0;
+
+		DRM_INFO("Setting GART location based on new memory map\n");
+
+		/* If using AGP, try to locate the AGP aperture at the same
+		 * location in the card and on the bus, though we have to
+		 * align it down.
+		 */
+#if __OS_HAS_AGP
+		if (dev_priv->flags & RADEON_IS_AGP) {
+			base = dev->agp->base;
+			/* Check if valid */
+			if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
+			    base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
+				DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
+					 dev->agp->base);
+				base = 0;
+			}
+		}
+#endif
+		/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+		if (base == 0) {
+			base = dev_priv->fb_location + dev_priv->fb_size;
+			if (base < dev_priv->fb_location ||
+			    ((base + dev_priv->gart_size) & 0xfffffffful) < base)
+				base = dev_priv->fb_location
+					- dev_priv->gart_size;
+		}
+		dev_priv->gart_vm_start = base & 0xffc00000u;
+		if (dev_priv->gart_vm_start != base)
+			DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+				 base, dev_priv->gart_vm_start);
+	} else {
+		DRM_INFO("Setting GART location based on old memory map\n");
+		dev_priv->gart_vm_start = dev_priv->fb_location +
+			RADEON_READ(RADEON_CONFIG_APER_SIZE);
+	}
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP)
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+						 - dev->agp->base
+						 + dev_priv->gart_vm_start);
+	else
+#endif
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+					- (unsigned long)dev->sg->vaddr
+					+ dev_priv->gart_vm_start);
+
+	DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
+	DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
+	DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
+		  dev_priv->gart_buffers_offset);
+
+	dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
+	dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+			      + init->ring_size / sizeof(u32));
+	dev_priv->ring.size = init->ring_size;
+	dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+	dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+	dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+
+	dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+	dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+	dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* Turn off PCI GART */
+		radeon_set_pcigart(dev_priv, 0);
+	} else
+#endif
+	{
+		u32 sctrl;
+		int ret;
+
+		dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+		/* if we have an offset set from userspace */
+		if (dev_priv->pcigart_offset_set) {
+			dev_priv->gart_info.bus_addr =
+				(resource_size_t)dev_priv->pcigart_offset + dev_priv->fb_location;
+			dev_priv->gart_info.mapping.offset =
+			    dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
+			dev_priv->gart_info.mapping.size =
+			    dev_priv->gart_info.table_size;
+
+			drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
+			dev_priv->gart_info.addr =
+			    dev_priv->gart_info.mapping.handle;
+
+			if (dev_priv->flags & RADEON_IS_PCIE)
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
+			else
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+			dev_priv->gart_info.gart_table_location =
+			    DRM_ATI_GART_FB;
+
+			DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
+				  dev_priv->gart_info.addr,
+				  dev_priv->pcigart_offset);
+		} else {
+			if (dev_priv->flags & RADEON_IS_IGPGART)
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
+			else
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+			dev_priv->gart_info.gart_table_location =
+			    DRM_ATI_GART_MAIN;
+			dev_priv->gart_info.addr = NULL;
+			dev_priv->gart_info.bus_addr = 0;
+			if (dev_priv->flags & RADEON_IS_PCIE) {
+				DRM_ERROR
+				    ("Cannot use PCI Express without GART in FB memory\n");
+				radeon_do_cleanup_cp(dev);
+				return -EINVAL;
+			}
+		}
+
+		sctrl = RADEON_READ(RADEON_SURFACE_CNTL);
+		RADEON_WRITE(RADEON_SURFACE_CNTL, 0);
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+			ret = r600_page_table_init(dev);
+		else
+			ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info);
+		RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl);
+
+		if (!ret) {
+			DRM_ERROR("failed to init PCI GART!\n");
+			radeon_do_cleanup_cp(dev);
+			return -ENOMEM;
+		}
+
+		ret = radeon_setup_pcigart_surface(dev_priv);
+		if (ret) {
+			DRM_ERROR("failed to setup GART surface!\n");
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+				r600_page_table_cleanup(dev, &dev_priv->gart_info);
+			else
+				drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info);
+			radeon_do_cleanup_cp(dev);
+			return ret;
+		}
+
+		/* Turn on PCI GART */
+		radeon_set_pcigart(dev_priv, 1);
+	}
+
+	if (!dev_priv->me_fw) {
+		int err = radeon_cp_init_microcode(dev_priv);
+		if (err) {
+			DRM_ERROR("Failed to load firmware!\n");
+			radeon_do_cleanup_cp(dev);
+			return err;
+		}
+	}
+	radeon_cp_load_microcode(dev_priv);
+	radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+	dev_priv->last_buf = 0;
+
+	radeon_do_engine_reset(dev);
+	radeon_test_writeback(dev_priv);
+
+	return 0;
+}
+
+static int radeon_do_cleanup_cp(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		if (dev_priv->cp_ring != NULL) {
+			drm_core_ioremapfree(dev_priv->cp_ring, dev);
+			dev_priv->cp_ring = NULL;
+		}
+		if (dev_priv->ring_rptr != NULL) {
+			drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+			dev_priv->ring_rptr = NULL;
+		}
+		if (dev->agp_buffer_map != NULL) {
+			drm_core_ioremapfree(dev->agp_buffer_map, dev);
+			dev->agp_buffer_map = NULL;
+		}
+	} else
+#endif
+	{
+
+		if (dev_priv->gart_info.bus_addr) {
+			/* Turn off PCI GART */
+			radeon_set_pcigart(dev_priv, 0);
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+				r600_page_table_cleanup(dev, &dev_priv->gart_info);
+			else {
+				if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
+					DRM_ERROR("failed to cleanup PCI GART!\n");
+			}
+		}
+
+		if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
+		{
+			drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
+			dev_priv->gart_info.addr = NULL;
+		}
+	}
+	/* only clear to the start of flags */
+	memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
+
+	return 0;
+}
+
+/* This code will reinit the Radeon CP hardware after a resume from disc.
+ * AFAIK, it would be very difficult to pickle the state at suspend time, so
+ * here we make sure that all Radeon hardware initialisation is re-done without
+ * affecting running applications.
+ *
+ * Charl P. Botha <http://cpbotha.net>
+ */
+static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv) {
+		DRM_ERROR("Called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("Starting radeon_do_resume_cp()\n");
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* Turn off PCI GART */
+		radeon_set_pcigart(dev_priv, 0);
+	} else
+#endif
+	{
+		/* Turn on PCI GART */
+		radeon_set_pcigart(dev_priv, 1);
+	}
+
+	radeon_cp_load_microcode(dev_priv);
+	radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+	dev_priv->have_z_offset = 0;
+	radeon_do_engine_reset(dev);
+	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+	DRM_DEBUG("radeon_do_resume_cp() complete\n");
+
+	return 0;
+}
+
+int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_init_t *init = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (init->func == RADEON_INIT_R300_CP)
+		r300_init_reg_flags(dev);
+
+	switch (init->func) {
+	case RADEON_INIT_CP:
+	case RADEON_INIT_R200_CP:
+	case RADEON_INIT_R300_CP:
+		return radeon_do_init_cp(dev, init, file_priv);
+	case RADEON_INIT_R600_CP:
+		return r600_do_init_cp(dev, init, file_priv);
+	case RADEON_CLEANUP_CP:
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			return r600_do_cleanup_cp(dev);
+		else
+			return radeon_do_cleanup_cp(dev);
+	}
+
+	return -EINVAL;
+}
+
+int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (dev_priv->cp_running) {
+		DRM_DEBUG("while CP running\n");
+		return 0;
+	}
+	if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
+		DRM_DEBUG("called with bogus CP mode (%d)\n",
+			  dev_priv->cp_mode);
+		return 0;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_do_cp_start(dev_priv);
+	else
+		radeon_do_cp_start(dev_priv);
+
+	return 0;
+}
+
+/* Stop the CP.  The engine must have been idled before calling this
+ * routine.
+ */
+int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_cp_stop_t *stop = data;
+	int ret;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv->cp_running)
+		return 0;
+
+	/* Flush any pending CP commands.  This ensures any outstanding
+	 * commands are exectuted by the engine before we turn it off.
+	 */
+	if (stop->flush) {
+		radeon_do_cp_flush(dev_priv);
+	}
+
+	/* If we fail to make the engine go idle, we return an error
+	 * code so that the DRM ioctl wrapper can try again.
+	 */
+	if (stop->idle) {
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			ret = r600_do_cp_idle(dev_priv);
+		else
+			ret = radeon_do_cp_idle(dev_priv);
+		if (ret)
+			return ret;
+	}
+
+	/* Finally, we can turn off the CP.  If the engine isn't idle,
+	 * we will get some dropped triangles as they won't be fully
+	 * rendered before the CP is shut down.
+	 */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_do_cp_stop(dev_priv);
+	else
+		radeon_do_cp_stop(dev_priv);
+
+	/* Reset the engine */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_do_engine_reset(dev);
+	else
+		radeon_do_engine_reset(dev);
+
+	return 0;
+}
+
+void radeon_do_release(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i, ret;
+
+	if (dev_priv) {
+		if (dev_priv->cp_running) {
+			/* Stop the cp */
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+				while ((ret = r600_do_cp_idle(dev_priv)) != 0) {
+					DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+					schedule();
+#else
+					tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+				}
+			} else {
+				while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
+					DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+					schedule();
+#else
+					tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+				}
+			}
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+				r600_do_cp_stop(dev_priv);
+				r600_do_engine_reset(dev);
+			} else {
+				radeon_do_cp_stop(dev_priv);
+				radeon_do_engine_reset(dev);
+			}
+		}
+
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) {
+			/* Disable *all* interrupts */
+			if (dev_priv->mmio)	/* remove this after permanent addmaps */
+				RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+			if (dev_priv->mmio) {	/* remove all surfaces */
+				for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+					RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
+					RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
+						     16 * i, 0);
+					RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
+						     16 * i, 0);
+				}
+			}
+		}
+
+		/* Free memory heap structures */
+		radeon_mem_takedown(&(dev_priv->gart_heap));
+		radeon_mem_takedown(&(dev_priv->fb_heap));
+
+		/* deallocate kernel resources */
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			r600_do_cleanup_cp(dev);
+		else
+			radeon_do_cleanup_cp(dev);
+		if (dev_priv->me_fw != NULL) {
+			firmware_put(dev_priv->me_fw, FIRMWARE_UNLOAD);
+			dev_priv->me_fw = NULL;
+		}
+		if (dev_priv->pfp_fw != NULL) {
+			firmware_put(dev_priv->pfp_fw, FIRMWARE_UNLOAD);
+			dev_priv->pfp_fw = NULL;
+		}
+	}
+}
+
+/* Just reset the CP ring.  Called as part of an X Server engine reset.
+ */
+int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_DEBUG("called before init done\n");
+		return -EINVAL;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_do_cp_reset(dev_priv);
+	else
+		radeon_do_cp_reset(dev_priv);
+
+	/* The CP is no longer running after an engine reset */
+	dev_priv->cp_running = 0;
+
+	return 0;
+}
+
+int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return r600_do_cp_idle(dev_priv);
+	else
+		return radeon_do_cp_idle(dev_priv);
+}
+
+/* Added by Charl P. Botha to call radeon_do_resume_cp().
+ */
+int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return r600_do_resume_cp(dev, file_priv);
+	else
+		return radeon_do_resume_cp(dev, file_priv);
+}
+
+int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return r600_do_engine_reset(dev);
+	else
+		return radeon_do_engine_reset(dev);
+}
+
+/* ================================================================
+ * Fullscreen mode
+ */
+
+/* KW: Deprecated to say the least:
+ */
+int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	return 0;
+}
+
+/* ================================================================
+ * Freelist management
+ */
+
+/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
+ *   bufs until freelist code is used.  Note this hides a problem with
+ *   the scratch register * (used to keep track of last buffer
+ *   completed) being written to before * the last buffer has actually
+ *   completed rendering.
+ *
+ * KW:  It's also a good way to find free buffers quickly.
+ *
+ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
+ * sleep.  However, bugs in older versions of radeon_accel.c mean that
+ * we essentially have to do this, else old clients will break.
+ *
+ * However, it does leave open a potential deadlock where all the
+ * buffers are held by other clients, which can't release them because
+ * they can't get the lock.
+ */
+
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_buf_priv_t *buf_priv;
+	struct drm_buf *buf;
+	int i, t;
+	int start;
+
+	if (++dev_priv->last_buf >= dma->buf_count)
+		dev_priv->last_buf = 0;
+
+	start = dev_priv->last_buf;
+
+	for (t = 0; t < dev_priv->usec_timeout; t++) {
+		u32 done_age = GET_SCRATCH(dev_priv, 1);
+		DRM_DEBUG("done_age = %d\n", done_age);
+		for (i = 0; i < dma->buf_count; i++) {
+			buf = dma->buflist[start];
+			buf_priv = buf->dev_private;
+			if (buf->file_priv == NULL || (buf->pending &&
+						       buf_priv->age <=
+						       done_age)) {
+				dev_priv->stats.requested_bufs++;
+				buf->pending = 0;
+				return buf;
+			}
+			if (++start >= dma->buf_count)
+				start = 0;
+		}
+
+		if (t) {
+			DRM_UDELAY(1);
+			dev_priv->stats.freelist_loops++;
+		}
+	}
+
+	return NULL;
+}
+
+void radeon_freelist_reset(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	dev_priv->last_buf = 0;
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+		buf_priv->age = 0;
+	}
+}
+
+/* ================================================================
+ * CP command submission
+ */
+
+int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
+{
+	drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
+	int i;
+	u32 last_head = GET_RING_HEAD(dev_priv);
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		u32 head = GET_RING_HEAD(dev_priv);
+
+		ring->space = (head - ring->tail) * sizeof(u32);
+		if (ring->space <= 0)
+			ring->space += ring->size;
+		if (ring->space > n)
+			return 0;
+
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+		if (head != last_head)
+			i = 0;
+		last_head = head;
+
+		DRM_UDELAY(1);
+	}
+
+	/* FIXME: This return value is ignored in the BEGIN_RING macro! */
+#if RADEON_FIFO_DEBUG
+	radeon_status(dev_priv);
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+static int radeon_cp_get_buffers(struct drm_device *dev,
+				 struct drm_file *file_priv,
+				 struct drm_dma * d)
+{
+	int i;
+	struct drm_buf *buf;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = radeon_freelist_get(dev);
+		if (!buf)
+			return -EBUSY;	/* NOTE: broken client */
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+				     sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+				     sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int ret = 0;
+	struct drm_dma *d = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	d->granted_count = 0;
+
+	if (d->request_count) {
+		ret = radeon_cp_get_buffers(dev, file_priv, d);
+	}
+
+	return ret;
+}
+
+int radeon_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	drm_radeon_private_t *dev_priv;
+	int ret = 0;
+
+	dev_priv = malloc(sizeof(drm_radeon_private_t),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = (void *)dev_priv;
+	dev_priv->flags = flags;
+
+	switch (flags & RADEON_FAMILY_MASK) {
+	case CHIP_R100:
+	case CHIP_RV200:
+	case CHIP_R200:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV570:
+	case CHIP_R580:
+		dev_priv->flags |= RADEON_HAS_HIERZ;
+		break;
+	default:
+		/* all other chips have no hierarchical z buffer */
+		break;
+	}
+
+	pci_enable_busmaster(dev->dev);
+
+	if (drm_pci_device_is_agp(dev))
+		dev_priv->flags |= RADEON_IS_AGP;
+	else if (drm_pci_device_is_pcie(dev))
+		dev_priv->flags |= RADEON_IS_PCIE;
+	else
+		dev_priv->flags |= RADEON_IS_PCI;
+
+	ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
+			 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
+			 _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
+	if (ret != 0)
+		return ret;
+
+	ret = drm_vblank_init(dev, 2);
+	if (ret) {
+		radeon_driver_unload(dev);
+		return ret;
+	}
+
+	DRM_DEBUG("%s card detected\n",
+		  ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
+	return ret;
+}
+
+int radeon_master_create(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_radeon_master_private *master_priv;
+	unsigned long sareapage;
+	int ret;
+
+	master_priv = malloc(sizeof(*master_priv),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!master_priv)
+		return -ENOMEM;
+
+	/* prebuild the SAREA */
+	sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
+	ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
+			 &master_priv->sarea);
+	if (ret) {
+		DRM_ERROR("SAREA setup failed\n");
+		free(master_priv, DRM_MEM_DRIVER);
+		return ret;
+	}
+	master_priv->sarea_priv = (drm_radeon_sarea_t *)((char *)master_priv->sarea->handle) +
+	    sizeof(struct drm_sarea);
+	master_priv->sarea_priv->pfCurrentPage = 0;
+
+	master->driver_priv = master_priv;
+	return 0;
+}
+
+void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+	if (!master_priv)
+		return;
+
+	if (master_priv->sarea_priv &&
+	    master_priv->sarea_priv->pfCurrentPage != 0)
+		radeon_cp_dispatch_flip(dev, master);
+
+	master_priv->sarea_priv = NULL;
+	if (master_priv->sarea)
+		drm_rmmap_locked(dev, master_priv->sarea);
+
+	free(master_priv, DRM_MEM_DRIVER);
+
+	master->driver_priv = NULL;
+}
+
+/* Create mappings for registers and framebuffer so userland doesn't necessarily
+ * have to find them.
+ */
+int radeon_driver_firstopen(struct drm_device *dev)
+{
+	int ret;
+	drm_local_map_t *map;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+
+	dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
+	ret = drm_addmap(dev, dev_priv->fb_aper_offset,
+			 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
+			 _DRM_WRITE_COMBINING, &map);
+	if (ret != 0)
+		return ret;
+
+	return 0;
+}
+
+int radeon_driver_unload(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+
+	drm_rmmap(dev, dev_priv->mmio);
+
+	free(dev_priv, DRM_MEM_DRIVER);
+
+	dev->dev_private = NULL;
+	return 0;
+}
+
+void radeon_commit_ring(drm_radeon_private_t *dev_priv)
+{
+	int i;
+	u32 *ring;
+	int tail_aligned;
+
+	/* check if the ring is padded out to 16-dword alignment */
+
+	tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
+	if (tail_aligned) {
+		int num_p2 = RADEON_RING_ALIGN - tail_aligned;
+
+		ring = dev_priv->ring.start;
+		/* pad with some CP_PACKET2 */
+		for (i = 0; i < num_p2; i++)
+			ring[dev_priv->ring.tail + i] = CP_PACKET2();
+
+		dev_priv->ring.tail += i;
+
+		dev_priv->ring.space -= num_p2 * sizeof(u32);
+	}
+
+	dev_priv->ring.tail &= dev_priv->ring.tail_mask;
+
+	DRM_MEMORYBARRIER();
+	GET_RING_HEAD( dev_priv );
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+		RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail);
+		/* read from PCI bus to ensure correct posting */
+		RADEON_READ(R600_CP_RB_RPTR);
+	} else {
+		RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail);
+		/* read from PCI bus to ensure correct posting */
+		RADEON_READ(RADEON_CP_RB_RPTR);
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_cp.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_cs.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_cs.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_cs.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,659 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jerome Glisse <glisse at freedesktop.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_cs.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+			 struct radeon_cs_packet *pkt);
+
+static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+{
+	struct drm_device *ddev = p->rdev->ddev;
+	struct radeon_cs_chunk *chunk;
+	unsigned i, j;
+	bool duplicate;
+
+	if (p->chunk_relocs_idx == -1) {
+		return 0;
+	}
+	chunk = &p->chunks[p->chunk_relocs_idx];
+	p->dma_reloc_idx = 0;
+	/* FIXME: we assume that each relocs use 4 dwords */
+	p->nrelocs = chunk->length_dw / 4;
+	p->relocs_ptr = malloc(p->nrelocs * sizeof(void *),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (p->relocs_ptr == NULL) {
+		return -ENOMEM;
+	}
+	p->relocs = malloc(p->nrelocs * sizeof(struct radeon_cs_reloc),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (p->relocs == NULL) {
+		return -ENOMEM;
+	}
+	for (i = 0; i < p->nrelocs; i++) {
+		struct drm_radeon_cs_reloc *r;
+
+		duplicate = false;
+		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
+		for (j = 0; j < i; j++) {
+			if (r->handle == p->relocs[j].handle) {
+				p->relocs_ptr[i] = &p->relocs[j];
+				duplicate = true;
+				break;
+			}
+		}
+		if (!duplicate) {
+			p->relocs[i].gobj = drm_gem_object_lookup(ddev,
+								  p->filp,
+								  r->handle);
+			if (p->relocs[i].gobj == NULL) {
+				DRM_ERROR("gem object lookup failed 0x%x\n",
+					  r->handle);
+				return -ENOENT;
+			}
+			p->relocs_ptr[i] = &p->relocs[i];
+			p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+			p->relocs[i].lobj.bo = p->relocs[i].robj;
+			p->relocs[i].lobj.wdomain = r->write_domain;
+			p->relocs[i].lobj.rdomain = r->read_domains;
+			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+			p->relocs[i].handle = r->handle;
+			p->relocs[i].flags = r->flags;
+			radeon_bo_list_add_object(&p->relocs[i].lobj,
+						  &p->validated);
+
+		} else
+			p->relocs[i].handle = 0;
+	}
+	return radeon_bo_list_validate(&p->validated);
+}
+
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+	p->priority = priority;
+
+	switch (ring) {
+	default:
+		DRM_ERROR("unknown ring id: %d\n", ring);
+		return -EINVAL;
+	case RADEON_CS_RING_GFX:
+		p->ring = RADEON_RING_TYPE_GFX_INDEX;
+		break;
+	case RADEON_CS_RING_COMPUTE:
+		if (p->rdev->family >= CHIP_TAHITI) {
+			if (p->priority > 0)
+				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
+			else
+				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
+		} else
+			p->ring = RADEON_RING_TYPE_GFX_INDEX;
+		break;
+	case RADEON_CS_RING_DMA:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			if (p->priority > 0)
+				p->ring = R600_RING_TYPE_DMA_INDEX;
+			else
+				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+		} else if (p->rdev->family >= CHIP_R600) {
+			p->ring = R600_RING_TYPE_DMA_INDEX;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	}
+	return 0;
+}
+
+static void radeon_cs_sync_to(struct radeon_cs_parser *p,
+			      struct radeon_fence *fence)
+{
+	struct radeon_fence *other;
+
+	if (!fence)
+		return;
+
+	other = p->ib.sync_to[fence->ring];
+	p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
+static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+	int i;
+
+	for (i = 0; i < p->nrelocs; i++) {
+		if (!p->relocs[i].robj)
+			continue;
+
+		radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
+	}
+}
+
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
+int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+{
+	struct drm_radeon_cs *cs = data;
+	uint64_t *chunk_array_ptr;
+	unsigned size, i;
+	u32 ring = RADEON_CS_RING_GFX;
+	s32 priority = 0;
+
+	if (!cs->num_chunks) {
+		return 0;
+	}
+	/* get chunks */
+	INIT_LIST_HEAD(&p->validated);
+	p->idx = 0;
+	p->ib.sa_bo = NULL;
+	p->ib.semaphore = NULL;
+	p->const_ib.sa_bo = NULL;
+	p->const_ib.semaphore = NULL;
+	p->chunk_ib_idx = -1;
+	p->chunk_relocs_idx = -1;
+	p->chunk_flags_idx = -1;
+	p->chunk_const_ib_idx = -1;
+	p->chunks_array = malloc(cs->num_chunks * sizeof(uint64_t),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (p->chunks_array == NULL) {
+		return -ENOMEM;
+	}
+	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
+	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+			       sizeof(uint64_t)*cs->num_chunks)) {
+		return -EFAULT;
+	}
+	p->cs_flags = 0;
+	p->nchunks = cs->num_chunks;
+	p->chunks = malloc(p->nchunks * sizeof(struct radeon_cs_chunk),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (p->chunks == NULL) {
+		return -ENOMEM;
+	}
+	for (i = 0; i < p->nchunks; i++) {
+		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
+		struct drm_radeon_cs_chunk user_chunk;
+		uint32_t __user *cdata;
+
+		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
+		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+				       sizeof(struct drm_radeon_cs_chunk))) {
+			return -EFAULT;
+		}
+		p->chunks[i].length_dw = user_chunk.length_dw;
+		p->chunks[i].kdata = NULL;
+		p->chunks[i].chunk_id = user_chunk.chunk_id;
+
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
+			p->chunk_relocs_idx = i;
+		}
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+			p->chunk_ib_idx = i;
+			/* zero length IB isn't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+			p->chunk_const_ib_idx = i;
+			/* zero length CONST IB isn't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+			p->chunk_flags_idx = i;
+			/* zero length flags aren't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+
+		p->chunks[i].length_dw = user_chunk.length_dw;
+		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
+
+		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+		if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+		    (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
+			size = p->chunks[i].length_dw * sizeof(uint32_t);
+			p->chunks[i].kdata = malloc(size, DRM_MEM_DRIVER, M_NOWAIT);
+			if (p->chunks[i].kdata == NULL) {
+				return -ENOMEM;
+			}
+			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
+					       p->chunks[i].user_ptr, size)) {
+				return -EFAULT;
+			}
+			if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+				p->cs_flags = p->chunks[i].kdata[0];
+				if (p->chunks[i].length_dw > 1)
+					ring = p->chunks[i].kdata[1];
+				if (p->chunks[i].length_dw > 2)
+					priority = (s32)p->chunks[i].kdata[2];
+			}
+		}
+	}
+
+	/* these are KMS only */
+	if (p->rdev) {
+		if ((p->cs_flags & RADEON_CS_USE_VM) &&
+		    !p->rdev->vm_manager.enabled) {
+			DRM_ERROR("VM not active on asic!\n");
+			return -EINVAL;
+		}
+
+		/* we only support VM on SI+ */
+		if ((p->rdev->family >= CHIP_TAHITI) &&
+		    ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+			DRM_ERROR("VM required on SI+!\n");
+			return -EINVAL;
+		}
+
+		if (radeon_cs_get_ring(p, ring, priority))
+			return -EINVAL;
+	}
+
+	/* deal with non-vm */
+	if ((p->chunk_ib_idx != -1) &&
+	    ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+	    (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+		if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+			DRM_ERROR("cs IB too big: %d\n",
+				  p->chunks[p->chunk_ib_idx].length_dw);
+			return -EINVAL;
+		}
+		if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
+			p->chunks[p->chunk_ib_idx].kpage[0] = malloc(PAGE_SIZE, DRM_MEM_DRIVER, M_NOWAIT);
+			p->chunks[p->chunk_ib_idx].kpage[1] = malloc(PAGE_SIZE, DRM_MEM_DRIVER, M_NOWAIT);
+			if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+			    p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+				free(p->chunks[p->chunk_ib_idx].kpage[0], DRM_MEM_DRIVER);
+				free(p->chunks[p->chunk_ib_idx].kpage[1], DRM_MEM_DRIVER);
+				p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+				p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
+				return -ENOMEM;
+			}
+		}
+		p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+		p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+		p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+		p->chunks[p->chunk_ib_idx].last_page_index =
+			((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+{
+	unsigned i;
+
+	if (!error) {
+		ttm_eu_fence_buffer_objects(&parser->validated,
+					    parser->ib.fence);
+	} else {
+		ttm_eu_backoff_reservation(&parser->validated);
+	}
+
+	if (parser->relocs != NULL) {
+		for (i = 0; i < parser->nrelocs; i++) {
+			if (parser->relocs[i].gobj)
+				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+		}
+	}
+	free(parser->track, DRM_MEM_DRIVER);
+	free(parser->relocs, DRM_MEM_DRIVER);
+	free(parser->relocs_ptr, DRM_MEM_DRIVER);
+	for (i = 0; i < parser->nchunks; i++) {
+		free(parser->chunks[i].kdata, DRM_MEM_DRIVER);
+		if ((parser->rdev->flags & RADEON_IS_AGP)) {
+			free(parser->chunks[i].kpage[0], DRM_MEM_DRIVER);
+			free(parser->chunks[i].kpage[1], DRM_MEM_DRIVER);
+		}
+	}
+	free(parser->chunks, DRM_MEM_DRIVER);
+	free(parser->chunks_array, DRM_MEM_DRIVER);
+	radeon_ib_free(parser->rdev, &parser->ib);
+	radeon_ib_free(parser->rdev, &parser->const_ib);
+}
+
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+			      struct radeon_cs_parser *parser)
+{
+	struct radeon_cs_chunk *ib_chunk;
+	int r;
+
+	if (parser->chunk_ib_idx == -1)
+		return 0;
+
+	if (parser->cs_flags & RADEON_CS_USE_VM)
+		return 0;
+
+	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+	/* Copy the packet into the IB, the parser will read from the
+	 * input memory (cached) and write to the IB (which can be
+	 * uncached).
+	 */
+	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+			   NULL, ib_chunk->length_dw * 4);
+	if (r) {
+		DRM_ERROR("Failed to get ib !\n");
+		return r;
+	}
+	parser->ib.length_dw = ib_chunk->length_dw;
+	r = radeon_cs_parse(rdev, parser->ring, parser);
+	if (r || parser->parser_error) {
+		DRM_ERROR("Invalid command stream !\n");
+		return r;
+	}
+	r = radeon_cs_finish_pages(parser);
+	if (r) {
+		DRM_ERROR("Invalid command stream !\n");
+		return r;
+	}
+	radeon_cs_sync_rings(parser);
+	r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+	if (r) {
+		DRM_ERROR("Failed to schedule IB !\n");
+	}
+	return r;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+				   struct radeon_vm *vm)
+{
+	struct radeon_device *rdev = parser->rdev;
+	struct radeon_bo_list *lobj;
+	struct radeon_bo *bo;
+	int r;
+
+	r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+	if (r) {
+		return r;
+	}
+	list_for_each_entry(lobj, &parser->validated, tv.head) {
+		bo = lobj->bo;
+		r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+				 struct radeon_cs_parser *parser)
+{
+	struct radeon_cs_chunk *ib_chunk;
+	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	int r;
+
+	if (parser->chunk_ib_idx == -1)
+		return 0;
+	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+		return 0;
+
+	if ((rdev->family >= CHIP_TAHITI) &&
+	    (parser->chunk_const_ib_idx != -1)) {
+		ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+			DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+				   vm, ib_chunk->length_dw * 4);
+		if (r) {
+			DRM_ERROR("Failed to get const ib !\n");
+			return r;
+		}
+		parser->const_ib.is_const_ib = true;
+		parser->const_ib.length_dw = ib_chunk->length_dw;
+		/* Copy the packet into the IB */
+		if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
+				       ib_chunk->length_dw * 4)) {
+			return -EFAULT;
+		}
+		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
+		if (r) {
+			return r;
+		}
+	}
+
+	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+	if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+		DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+			   vm, ib_chunk->length_dw * 4);
+	if (r) {
+		DRM_ERROR("Failed to get ib !\n");
+		return r;
+	}
+	parser->ib.length_dw = ib_chunk->length_dw;
+	/* Copy the packet into the IB */
+	if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
+			       ib_chunk->length_dw * 4)) {
+		return -EFAULT;
+	}
+	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
+	if (r) {
+		return r;
+	}
+
+	sx_xlock(&rdev->vm_manager.lock);
+	sx_xlock(&vm->mutex);
+	r = radeon_vm_alloc_pt(rdev, vm);
+	if (r) {
+		goto out;
+	}
+	r = radeon_bo_vm_update_pte(parser, vm);
+	if (r) {
+		goto out;
+	}
+	radeon_cs_sync_rings(parser);
+	radeon_cs_sync_to(parser, vm->fence);
+	radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
+
+	if ((rdev->family >= CHIP_TAHITI) &&
+	    (parser->chunk_const_ib_idx != -1)) {
+		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+	} else {
+		r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+	}
+
+	if (!r) {
+		radeon_vm_fence(rdev, vm, parser->ib.fence);
+	}
+
+out:
+	radeon_vm_add_to_lru(rdev, vm);
+	sx_xunlock(&vm->mutex);
+	sx_xunlock(&rdev->vm_manager.lock);
+	return r;
+}
+
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+	if (r == -EDEADLK) {
+		r = radeon_gpu_reset(rdev);
+		if (!r)
+			r = -EAGAIN;
+	}
+	return r;
+}
+
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_cs_parser parser;
+	int r;
+
+	sx_slock(&rdev->exclusive_lock);
+	if (!rdev->accel_working) {
+		sx_sunlock(&rdev->exclusive_lock);
+		return -EBUSY;
+	}
+	/* initialize parser */
+	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+	parser.filp = filp;
+	parser.rdev = rdev;
+	parser.dev = rdev->dev;
+	parser.family = rdev->family;
+	r = radeon_cs_parser_init(&parser, data);
+	if (r) {
+		DRM_ERROR("Failed to initialize parser !\n");
+		radeon_cs_parser_fini(&parser, r);
+		sx_sunlock(&rdev->exclusive_lock);
+		r = radeon_cs_handle_lockup(rdev, r);
+		return r;
+	}
+	r = radeon_cs_parser_relocs(&parser);
+	if (r) {
+		if (r != -ERESTARTSYS)
+			DRM_ERROR("Failed to parse relocation %d!\n", r);
+		radeon_cs_parser_fini(&parser, r);
+		sx_sunlock(&rdev->exclusive_lock);
+		r = radeon_cs_handle_lockup(rdev, r);
+		return r;
+	}
+	r = radeon_cs_ib_chunk(rdev, &parser);
+	if (r) {
+		goto out;
+	}
+	r = radeon_cs_ib_vm_chunk(rdev, &parser);
+	if (r) {
+		goto out;
+	}
+out:
+	radeon_cs_parser_fini(&parser, r);
+	sx_sunlock(&rdev->exclusive_lock);
+	r = radeon_cs_handle_lockup(rdev, r);
+	return r;
+}
+
+int radeon_cs_finish_pages(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+	int i;
+	int size = PAGE_SIZE;
+
+	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
+		if (i == ibc->last_page_index) {
+			size = (ibc->length_dw * 4) % PAGE_SIZE;
+			if (size == 0)
+				size = PAGE_SIZE;
+		}
+		
+		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+				       (char *)ibc->user_ptr + (i * PAGE_SIZE),
+				       size))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+{
+	int new_page;
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+	int i;
+	int size = PAGE_SIZE;
+	bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+		false : true;
+
+	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
+		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+				       (char *)ibc->user_ptr + (i * PAGE_SIZE),
+				       PAGE_SIZE)) {
+			p->parser_error = -EFAULT;
+			return 0;
+		}
+	}
+
+	if (pg_idx == ibc->last_page_index) {
+		size = (ibc->length_dw * 4) % PAGE_SIZE;
+		if (size == 0)
+			size = PAGE_SIZE;
+	}
+
+	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
+	if (copy1)
+		ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
+
+	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
+			       (char *)ibc->user_ptr + (pg_idx * PAGE_SIZE),
+			       size)) {
+		p->parser_error = -EFAULT;
+		return 0;
+	}
+
+	/* copy to IB for non single case */
+	if (!copy1)
+		memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+
+	ibc->last_copied_page = pg_idx;
+	ibc->kpage_idx[new_page] = pg_idx;
+
+	return new_page;
+}
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+	u32 pg_idx, pg_offset;
+	u32 idx_value = 0;
+	int new_page;
+
+	pg_idx = (idx * 4) / PAGE_SIZE;
+	pg_offset = (idx * 4) % PAGE_SIZE;
+
+	if (ibc->kpage_idx[0] == pg_idx)
+		return ibc->kpage[0][pg_offset/4];
+	if (ibc->kpage_idx[1] == pg_idx)
+		return ibc->kpage[1][pg_offset/4];
+
+	new_page = radeon_cs_update_pages(p, pg_idx);
+	if (new_page < 0) {
+		p->parser_error = new_page;
+		return 0;
+	}
+
+	idx_value = ibc->kpage[new_page][pg_offset/4];
+	return idx_value;
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_cs.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_cursor.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_cursor.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_cursor.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,317 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_cursor.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	uint32_t cur_lock;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+		else
+			cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+		WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
+		else
+			cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
+		WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+	} else {
+		cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= RADEON_CUR_LOCK;
+		else
+			cur_lock &= ~RADEON_CUR_LOCK;
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
+	}
+}
+
+static void radeon_hide_cursor(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+			   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+			   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+			   (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+	} else {
+		u32 reg;
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+			reg = RADEON_CRTC_GEN_CNTL;
+			break;
+		case 1:
+			reg = RADEON_CRTC2_GEN_CNTL;
+			break;
+		default:
+			return;
+		}
+		WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
+	}
+}
+
+static void radeon_show_cursor(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+		       EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
+		WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
+		       (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+	} else {
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+			WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+			break;
+		case 1:
+			WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+			break;
+		default:
+			return;
+		}
+
+		WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
+					  (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
+			 ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
+	}
+}
+
+static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+			      uint64_t gpu_addr)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+		       upper_32_bits(gpu_addr));
+		WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+		       gpu_addr & 0xffffffff);
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if (rdev->family >= CHIP_RV770) {
+			if (radeon_crtc->crtc_id)
+				WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+			else
+				WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+		}
+		WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+		       gpu_addr & 0xffffffff);
+	} else {
+		radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
+		/* offset is from DISP(2)_BASE_ADDRESS */
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
+	}
+}
+
+int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+			   struct drm_file *file_priv,
+			   uint32_t handle,
+			   uint32_t width,
+			   uint32_t height)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct radeon_bo *robj;
+	uint64_t gpu_addr;
+	int ret;
+
+	if (!handle) {
+		/* turn off cursor */
+		radeon_hide_cursor(crtc);
+		obj = NULL;
+		goto unpin;
+	}
+
+	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+		return -ENOENT;
+	}
+
+	robj = gem_to_radeon_bo(obj);
+	ret = radeon_bo_reserve(robj, false);
+	if (unlikely(ret != 0))
+		goto fail;
+	/* Only 27 bit offset for legacy cursor */
+	ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+				       &gpu_addr);
+	radeon_bo_unreserve(robj);
+	if (ret)
+		goto fail;
+
+	radeon_crtc->cursor_width = width;
+	radeon_crtc->cursor_height = height;
+
+	radeon_lock_cursor(crtc, true);
+	radeon_set_cursor(crtc, obj, gpu_addr);
+	radeon_show_cursor(crtc);
+	radeon_lock_cursor(crtc, false);
+
+unpin:
+	if (radeon_crtc->cursor_bo) {
+		robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+		ret = radeon_bo_reserve(robj, false);
+		if (likely(ret == 0)) {
+			radeon_bo_unpin(robj);
+			radeon_bo_unreserve(robj);
+		}
+		drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+	}
+
+	radeon_crtc->cursor_bo = obj;
+	return 0;
+fail:
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+			    int x, int y)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	int xorigin = 0, yorigin = 0;
+	int w = radeon_crtc->cursor_width;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		/* avivo cursor are offset into the total surface */
+		x += crtc->x;
+		y += crtc->y;
+	}
+	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+	if (x < 0) {
+		xorigin = min(-x, CURSOR_WIDTH - 1);
+		x = 0;
+	}
+	if (y < 0) {
+		yorigin = min(-y, CURSOR_HEIGHT - 1);
+		y = 0;
+	}
+
+	/* fixed on DCE6 and newer */
+	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
+		int i = 0;
+		struct drm_crtc *crtc_p;
+
+		/* avivo cursor image can't end on 128 pixel boundary or
+		 * go past the end of the frame if both crtcs are enabled
+		 */
+		list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
+			if (crtc_p->enabled)
+				i++;
+		}
+		if (i > 1) {
+			int cursor_end, frame_end;
+
+			cursor_end = x - xorigin + w;
+			frame_end = crtc->x + crtc->mode.crtc_hdisplay;
+			if (cursor_end >= frame_end) {
+				w = w - (cursor_end - frame_end);
+				if (!(frame_end & 0x7f))
+					w--;
+			} else {
+				if (!(cursor_end & 0x7f))
+					w--;
+			}
+			if (w <= 0) {
+				w = 1;
+				cursor_end = x - xorigin + w;
+				if (!(cursor_end & 0x7f)) {
+					x--;
+					if (x < 0) {
+						DRM_ERROR("%s: x(%d) < 0", __func__, x);
+					}
+				}
+			}
+		}
+	}
+
+	radeon_lock_cursor(crtc, true);
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+		WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+		WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+		WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+		WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
+		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+	} else {
+		if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+			y *= 2;
+
+		WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
+		       (RADEON_CUR_LOCK
+			| (xorigin << 16)
+			| yorigin));
+		WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
+		       (RADEON_CUR_LOCK
+			| (x << 16)
+			| y));
+		/* offset is from DISP(2)_BASE_ADDRESS */
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
+								      (yorigin * 256)));
+	}
+	radeon_lock_cursor(crtc, false);
+
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_cursor.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_device.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_device.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_device.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1570 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_device.c 289784 2015-10-23 07:25:14Z avg $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+static const char radeon_family_name[][16] = {
+	"R100",
+	"RV100",
+	"RS100",
+	"RV200",
+	"RS200",
+	"R200",
+	"RV250",
+	"RS300",
+	"RV280",
+	"R300",
+	"R350",
+	"RV350",
+	"RV380",
+	"R420",
+	"R423",
+	"RV410",
+	"RS400",
+	"RS480",
+	"RS600",
+	"RS690",
+	"RS740",
+	"RV515",
+	"R520",
+	"RV530",
+	"RV560",
+	"RV570",
+	"R580",
+	"R600",
+	"RV610",
+	"RV630",
+	"RV670",
+	"RV620",
+	"RV635",
+	"RS780",
+	"RS880",
+	"RV770",
+	"RV730",
+	"RV710",
+	"RV740",
+	"CEDAR",
+	"REDWOOD",
+	"JUNIPER",
+	"CYPRESS",
+	"HEMLOCK",
+	"PALM",
+	"SUMO",
+	"SUMO2",
+	"BARTS",
+	"TURKS",
+	"CAICOS",
+	"CAYMAN",
+	"ARUBA",
+	"TAHITI",
+	"PITCAIRN",
+	"VERDE",
+	"LAST",
+};
+
+/**
+ * radeon_surface_init - Clear GPU surface registers.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clear GPU surface registers (r1xx-r5xx).
+ */
+void radeon_surface_init(struct radeon_device *rdev)
+{
+	/* FIXME: check this out */
+	if (rdev->family < CHIP_R600) {
+		int i;
+
+		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+			if (rdev->surface_regs[i].bo)
+				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+			else
+				radeon_clear_surface_reg(rdev, i);
+		}
+		/* enable surfaces */
+		WREG32(RADEON_SURFACE_CNTL, 0);
+	}
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+/**
+ * radeon_scratch_init - Init scratch register driver information.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init CP scratch register driver information (r1xx-r5xx)
+ */
+void radeon_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	/* FIXME: check this out */
+	if (rdev->family < CHIP_R300) {
+		rdev->scratch.num_reg = 5;
+	} else {
+		rdev->scratch.num_reg = 7;
+	}
+	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+/**
+ * radeon_scratch_get - Allocate a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Allocate a CP scratch register for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
+{
+	int i;
+
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		if (rdev->scratch.free[i]) {
+			rdev->scratch.free[i] = false;
+			*reg = rdev->scratch.reg[i];
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+/**
+ * radeon_scratch_free - Free a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Free a CP scratch register allocated for use by the driver (all asics)
+ */
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
+{
+	int i;
+
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		if (rdev->scratch.reg[i] == reg) {
+			rdev->scratch.free[i] = true;
+			return;
+		}
+	}
+}
+
+/*
+ * radeon_wb_*()
+ * Writeback is the the method by which the the GPU updates special pages
+ * in memory with the status of certain GPU events (fences, ring pointers,
+ * etc.).
+ */
+
+/**
+ * radeon_wb_disable - Disable Writeback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback (all asics).  Used for suspend.
+ */
+void radeon_wb_disable(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->wb.wb_obj) {
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0))
+			return;
+		radeon_bo_kunmap(rdev->wb.wb_obj);
+		radeon_bo_unpin(rdev->wb.wb_obj);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
+	}
+	rdev->wb.enabled = false;
+}
+
+/**
+ * radeon_wb_fini - Disable Writeback and free memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver shutdown.
+ */
+void radeon_wb_fini(struct radeon_device *rdev)
+{
+	radeon_wb_disable(rdev);
+	if (rdev->wb.wb_obj) {
+		radeon_bo_unref(&rdev->wb.wb_obj);
+		rdev->wb.wb = NULL;
+		rdev->wb.wb_obj = NULL;
+	}
+}
+
+/**
+ * radeon_wb_init- Init Writeback driver info and allocate memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver startup.
+ * Returns 0 on success or an -error on failure.
+ */
+int radeon_wb_init(struct radeon_device *rdev)
+{
+	int r;
+	void *wb_ptr; /* FreeBSD: to please GCC 4.2. */
+
+	if (rdev->wb.wb_obj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+			return r;
+		}
+	}
+	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+	if (unlikely(r != 0)) {
+		radeon_wb_fini(rdev);
+		return r;
+	}
+	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+			  &rdev->wb.gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->wb.wb_obj);
+		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+		radeon_wb_fini(rdev);
+		return r;
+	}
+	wb_ptr = &rdev->wb.wb;
+	r = radeon_bo_kmap(rdev->wb.wb_obj, wb_ptr);
+	radeon_bo_unreserve(rdev->wb.wb_obj);
+	if (r) {
+		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+		radeon_wb_fini(rdev);
+		return r;
+	}
+
+	/* clear wb memory */
+	memset(*(void **)wb_ptr, 0, RADEON_GPU_PAGE_SIZE);
+	/* disable event_write fences */
+	rdev->wb.use_event = false;
+	/* disabled via module param */
+	if (radeon_no_wb == 1) {
+		rdev->wb.enabled = false;
+	} else {
+		if (rdev->flags & RADEON_IS_AGP) {
+			/* often unreliable on AGP */
+			rdev->wb.enabled = false;
+		} else if (rdev->family < CHIP_R300) {
+			/* often unreliable on pre-r300 */
+			rdev->wb.enabled = false;
+		} else {
+			rdev->wb.enabled = true;
+			/* event_write fences are only available on r600+ */
+			if (rdev->family >= CHIP_R600) {
+				rdev->wb.use_event = true;
+			}
+		}
+	}
+	/* always use writeback/events on NI, APUs */
+	if (rdev->family >= CHIP_PALM) {
+		rdev->wb.enabled = true;
+		rdev->wb.use_event = true;
+	}
+
+	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
+
+	return 0;
+}
+
+/**
+ * radeon_vram_location - try to find VRAM location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ * @base: base address at which to put VRAM
+ *
+ * Function will place try to place VRAM at base address provided
+ * as parameter (which is so far either PCI aperture address or
+ * for IGP TOM base address).
+ *
+ * If there is not enough space to fit the unvisible VRAM in the 32bits
+ * address space then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP and if the AGP aperture doesn't allow us to have
+ * room for all the VRAM than we restrict the VRAM to the PCI aperture
+ * size and print a warning.
+ *
+ * This function will never fails, worst case are limiting VRAM.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ *
+ * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
+ * this shouldn't be a problem as we are using the PCI aperture as a reference.
+ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
+ * not IGP.
+ *
+ * Note: we use mc_vram_size as on some board we need to program the mc to
+ * cover the whole aperture even if VRAM size is inferior to aperture size
+ * Novell bug 204882 + along with lots of ubuntu ones
+ *
+ * Note: when limiting vram it's safe to overwritte real_vram_size because
+ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
+ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * ones)
+ *
+ * Note: IGP TOM addr should be the same as the aperture addr, we don't
+ * explicitly check for that thought.
+ *
+ * FIXME: when reducing VRAM size align new size on power of 2.
+ */
+void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
+{
+	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
+
+	mc->vram_start = base;
+	if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
+		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+		mc->real_vram_size = mc->aper_size;
+		mc->mc_vram_size = mc->aper_size;
+	}
+	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
+		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+		mc->real_vram_size = mc->aper_size;
+		mc->mc_vram_size = mc->aper_size;
+	}
+	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+	if (limit && limit < mc->real_vram_size)
+		mc->real_vram_size = limit;
+	dev_info(rdev->dev, "VRAM: %juM 0x%016jX - 0x%016jX (%juM used)\n",
+			(uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start,
+			(uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20);
+}
+
+/**
+ * radeon_gtt_location - try to find GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place GTT before or after VRAM.
+ *
+ * If GTT size is bigger than space left then we ajust GTT size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing GTT size align new size on power of 2.
+ */
+void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_af, size_bf;
+
+	size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+	size_bf = mc->vram_start & ~mc->gtt_base_align;
+	if (size_bf > size_af) {
+		if (mc->gtt_size > size_bf) {
+			dev_warn(rdev->dev, "limiting GTT\n");
+			mc->gtt_size = size_bf;
+		}
+		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
+	} else {
+		if (mc->gtt_size > size_af) {
+			dev_warn(rdev->dev, "limiting GTT\n");
+			mc->gtt_size = size_af;
+		}
+		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+	}
+	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+	dev_info(rdev->dev, "GTT: %juM 0x%016jX - 0x%016jX\n",
+			(uintmax_t)mc->gtt_size >> 20, (uintmax_t)mc->gtt_start, (uintmax_t)mc->gtt_end);
+}
+
+/*
+ * GPU helpers function.
+ */
+/**
+ * radeon_card_posted - check if the hw has already been initialized
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic has been initialized (all asics).
+ * Used at driver startup.
+ * Returns true if initialized or false if not.
+ */
+bool radeon_card_posted(struct radeon_device *rdev)
+{
+	uint32_t reg;
+
+#ifdef FREEBSD_WIP
+	if (efi_enabled(EFI_BOOT) &&
+	    rdev->dev->pci_subvendor == PCI_VENDOR_ID_APPLE)
+		return false;
+#endif /* FREEBSD_WIP */
+
+	/* first check CRTCs */
+	if (ASIC_IS_DCE41(rdev)) {
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+		if (reg & EVERGREEN_CRTC_MASTER_EN)
+			return true;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+		if (reg & EVERGREEN_CRTC_MASTER_EN)
+			return true;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
+		      RREG32(AVIVO_D2CRTC_CONTROL);
+		if (reg & AVIVO_CRTC_EN) {
+			return true;
+		}
+	} else {
+		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
+		      RREG32(RADEON_CRTC2_GEN_CNTL);
+		if (reg & RADEON_CRTC_EN) {
+			return true;
+		}
+	}
+
+	/* then check MEM_SIZE, in case the crtcs are off */
+	if (rdev->family >= CHIP_R600)
+		reg = RREG32(R600_CONFIG_MEMSIZE);
+	else
+		reg = RREG32(RADEON_CONFIG_MEMSIZE);
+
+	if (reg)
+		return true;
+
+	return false;
+
+}
+
+/**
+ * radeon_update_bandwidth_info - update display bandwidth params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Used when sclk/mclk are switched or display modes are set.
+ * params are used to calculate display watermarks (all asics)
+ */
+void radeon_update_bandwidth_info(struct radeon_device *rdev)
+{
+	fixed20_12 a;
+	u32 sclk = rdev->pm.current_sclk;
+	u32 mclk = rdev->pm.current_mclk;
+
+	/* sclk/mclk in Mhz */
+	a.full = dfixed_const(100);
+	rdev->pm.sclk.full = dfixed_const(sclk);
+	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+	rdev->pm.mclk.full = dfixed_const(mclk);
+	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		a.full = dfixed_const(16);
+		/* core_bandwidth = sclk(Mhz) * 16 */
+		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
+	}
+}
+
+/**
+ * radeon_boot_test_post_card - check and possibly initialize the hw
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic is initialized and if not, attempt to initialize
+ * it (all asics).
+ * Returns true if initialized or false if not.
+ */
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+	if (radeon_card_posted(rdev))
+		return true;
+
+	if (rdev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		if (rdev->is_atom_bios)
+			atom_asic_init(rdev->mode_info.atom_context);
+		else
+			radeon_combios_asic_init(rdev->ddev);
+		return true;
+	} else {
+		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+		return false;
+	}
+}
+
+/**
+ * radeon_dummy_page_init - init dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
+int radeon_dummy_page_init(struct radeon_device *rdev)
+{
+	if (rdev->dummy_page.dmah)
+		return 0;
+	rdev->dummy_page.dmah = drm_pci_alloc(rdev->ddev,
+	    PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR_32BIT);
+	if (rdev->dummy_page.dmah == NULL)
+		return -ENOMEM;
+	rdev->dummy_page.addr = rdev->dummy_page.dmah->busaddr;
+	return 0;
+}
+
+/**
+ * radeon_dummy_page_fini - free dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
+void radeon_dummy_page_fini(struct radeon_device *rdev)
+{
+	if (rdev->dummy_page.dmah == NULL)
+		return;
+	drm_pci_free(rdev->ddev, rdev->dummy_page.dmah);
+	rdev->dummy_page.dmah = NULL;
+	rdev->dummy_page.addr = 0;
+}
+
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios.  The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = rdev->pll_rreg(rdev, reg);
+	return r;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	rdev->pll_wreg(rdev, reg, val);
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = rdev->mc_rreg(rdev, reg);
+	return r;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	rdev->mc_wreg(rdev, reg, val);
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	WREG32(reg*4, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = RREG32(reg*4);
+	return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	WREG32_IO(reg*4, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = RREG32_IO(reg*4);
+	return r;
+}
+
+/**
+ * radeon_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int radeon_atombios_init(struct radeon_device *rdev)
+{
+	struct card_info *atom_card_info =
+	    malloc(sizeof(struct card_info),
+		DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+	if (!atom_card_info)
+		return -ENOMEM;
+
+	rdev->mode_info.atom_card_info = atom_card_info;
+	atom_card_info->dev = rdev->ddev;
+	atom_card_info->reg_read = cail_reg_read;
+	atom_card_info->reg_write = cail_reg_write;
+	/* needed for iio ops */
+	if (rdev->rio_mem) {
+		atom_card_info->ioreg_read = cail_ioreg_read;
+		atom_card_info->ioreg_write = cail_ioreg_write;
+	} else {
+		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
+		atom_card_info->ioreg_read = cail_reg_read;
+		atom_card_info->ioreg_write = cail_reg_write;
+	}
+	atom_card_info->mc_read = cail_mc_read;
+	atom_card_info->mc_write = cail_mc_write;
+	atom_card_info->pll_read = cail_pll_read;
+	atom_card_info->pll_write = cail_pll_write;
+
+	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+	sx_init(&rdev->mode_info.atom_context->mutex,
+	    "drm__radeon_device__mode_info__atom_context__mutex");
+	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
+	return 0;
+}
+
+/**
+ * radeon_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void radeon_atombios_fini(struct radeon_device *rdev)
+{
+	if (rdev->mode_info.atom_context) {
+		free(rdev->mode_info.atom_context->scratch, DRM_MEM_DRIVER);
+		atom_destroy(rdev->mode_info.atom_context);
+	}
+	free(rdev->mode_info.atom_card_info, DRM_MEM_DRIVER);
+}
+
+/* COMBIOS */
+/*
+ * COMBIOS is the bios format prior to ATOM. It provides
+ * command tables similar to ATOM, but doesn't have a unified
+ * parser.  See radeon_combios.c
+ */
+
+/**
+ * radeon_combios_init - init the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info for combios (r1xx-r3xx).
+ * Returns 0 on sucess.
+ * Called at driver startup.
+ */
+int radeon_combios_init(struct radeon_device *rdev)
+{
+	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
+	return 0;
+}
+
+/**
+ * radeon_combios_fini - free the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info for combios (r1xx-r3xx).
+ * Called at driver shutdown.
+ */
+void radeon_combios_fini(struct radeon_device *rdev)
+{
+}
+
+#ifdef FREEBSD_WIP
+/* if we get transitioned to only one device, take VGA back */
+/**
+ * radeon_vga_set_decode - enable/disable vga decode
+ *
+ * @cookie: radeon_device pointer
+ * @state: enable/disable vga decode
+ *
+ * Enable/disable vga decode (all asics).
+ * Returns VGA resource flags.
+ */
+static unsigned int radeon_vga_set_decode(void *cookie, bool state)
+{
+	struct radeon_device *rdev = cookie;
+	radeon_vga_set_state(rdev, state);
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+#endif /* FREEBSD_WIP */
+
+/**
+ * radeon_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool radeon_check_pot_argument(int arg)
+{
+	return (arg & (arg - 1)) == 0;
+}
+
+/**
+ * radeon_check_arguments - validate module params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Validates certain module parameters and updates
+ * the associated values used by the driver (all asics).
+ */
+static void radeon_check_arguments(struct radeon_device *rdev)
+{
+	/* vramlimit must be a power of two */
+	if (!radeon_check_pot_argument(radeon_vram_limit)) {
+		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
+				radeon_vram_limit);
+		radeon_vram_limit = 0;
+	}
+
+	/* gtt size must be power of two and greater or equal to 32M */
+	if (radeon_gart_size < 32) {
+		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+				radeon_gart_size);
+		radeon_gart_size = 512;
+
+	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
+		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
+				radeon_gart_size);
+		radeon_gart_size = 512;
+	}
+	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
+
+	/* AGP mode can only be -1, 1, 2, 4, 8 */
+	switch (radeon_agpmode) {
+	case -1:
+	case 0:
+	case 1:
+	case 2:
+	case 4:
+	case 8:
+		break;
+	default:
+		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
+				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
+		radeon_agpmode = 0;
+		break;
+	}
+}
+
+/**
+ * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
+ * needed for waking up.
+ *
+ * @pdev: pci dev pointer
+ */
+#ifdef FREEBSD_WIP
+static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
+{
+
+	/* 6600m in a macbook pro */
+	if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+	    pdev->subsystem_device == 0x00e2) {
+		printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
+		return true;
+	}
+
+	return false;
+}
+#endif /* FREEBSD_WIP */
+
+/**
+ * radeon_switcheroo_set_state - set switcheroo state
+ *
+ * @pdev: pci dev pointer
+ * @state: vga switcheroo state
+ *
+ * Callback for the switcheroo driver.  Suspends or resumes the
+ * the asics before or after it is powered up using ACPI methods.
+ */
+#ifdef FREEBSD_WIP
+static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+	if (state == VGA_SWITCHEROO_ON) {
+		unsigned d3_delay = dev->pdev->d3_delay;
+
+		printk(KERN_INFO "radeon: switched on\n");
+		/* don't suspend or resume card normally */
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+		if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+			dev->pdev->d3_delay = 20;
+
+		radeon_resume_kms(dev);
+
+		dev->pdev->d3_delay = d3_delay;
+
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
+		drm_kms_helper_poll_enable(dev);
+	} else {
+		printk(KERN_INFO "radeon: switched off\n");
+		drm_kms_helper_poll_disable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		radeon_suspend_kms(dev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+	}
+}
+#endif /* FREEBSD_WIP */
+
+/**
+ * radeon_switcheroo_can_switch - see if switcheroo state can change
+ *
+ * @pdev: pci dev pointer
+ *
+ * Callback for the switcheroo driver.  Check of the switcheroo
+ * state can be changed.
+ * Returns true if the state can be changed, false if not.
+ */
+#ifdef FREEBSD_WIP
+static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	bool can_switch;
+
+	spin_lock(&dev->count_lock);
+	can_switch = (dev->open_count == 0);
+	spin_unlock(&dev->count_lock);
+	return can_switch;
+}
+
+static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+	.set_gpu_state = radeon_switcheroo_set_state,
+	.reprobe = NULL,
+	.can_switch = radeon_switcheroo_can_switch,
+};
+#endif /* FREEBSD_WIP */
+
+/**
+ * radeon_device_init - initialize the driver
+ *
+ * @rdev: radeon_device pointer
+ * @pdev: drm dev pointer
+ * @flags: driver flags
+ *
+ * Initializes the driver info and hw (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver startup.
+ */
+int radeon_device_init(struct radeon_device *rdev,
+		       struct drm_device *ddev,
+		       uint32_t flags)
+{
+	int r, i;
+	int dma_bits;
+
+	rdev->shutdown = false;
+	rdev->dev = ddev->dev;
+	rdev->ddev = ddev;
+	rdev->flags = flags;
+	rdev->family = flags & RADEON_FAMILY_MASK;
+	rdev->is_atom_bios = false;
+	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
+	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+	rdev->accel_working = false;
+	rdev->fictitious_range_registered = false;
+	rdev->fictitious_agp_range_registered = false;
+	/* set up ring ids */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		rdev->ring[i].idx = i;
+	}
+
+	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
+		radeon_family_name[rdev->family], ddev->pci_vendor, ddev->pci_device,
+		ddev->pci_subvendor, ddev->pci_subdevice);
+
+	/* mutex initialization are all done here so we
+	 * can recall function without having locking issues */
+	sx_init(&rdev->ring_lock, "drm__radeon_device__ring_lock");
+	sx_init(&rdev->dc_hw_i2c_mutex, "drm__radeon_device__dc_hw_i2c_mutex");
+	atomic_set(&rdev->ih.lock, 0);
+	sx_init(&rdev->gem.mutex, "drm__radeon_device__gem__mutex");
+	sx_init(&rdev->pm.mutex, "drm__radeon_device__pm__mutex");
+	sx_init(&rdev->gpu_clock_mutex, "drm__radeon_device__gpu_clock_mutex");
+	sx_init(&rdev->pm.mclk_lock, "drm__radeon_device__pm__mclk_lock");
+	sx_init(&rdev->exclusive_lock, "drm__radeon_device__exclusive_lock");
+	DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue);
+	r = radeon_gem_init(rdev);
+	if (r)
+		return r;
+	/* initialize vm here */
+	sx_init(&rdev->vm_manager.lock, "drm__radeon_device__vm_manager__lock");
+	/* Adjust VM size here.
+	 * Currently set to 4GB ((1 << 20) 4k pages).
+	 * Max GPUVM size for cayman and SI is 40 bits.
+	 */
+	rdev->vm_manager.max_pfn = 1 << 20;
+	INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
+
+	/* Set asic functions */
+	r = radeon_asic_init(rdev);
+	if (r)
+		return r;
+	radeon_check_arguments(rdev);
+
+	/* all of the newer IGP chips have an internal gart
+	 * However some rs4xx report as AGP, so remove that here.
+	 */
+	if ((rdev->family >= CHIP_RS400) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		rdev->flags &= ~RADEON_IS_AGP;
+	}
+
+	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
+		radeon_agp_disable(rdev);
+	}
+
+	/* set DMA mask + need_dma32 flags.
+	 * PCIE - can handle 40-bits.
+	 * IGP - can handle 40-bits
+	 * AGP - generally dma32 is safest
+	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
+	 */
+	rdev->need_dma32 = false;
+	if (rdev->flags & RADEON_IS_AGP)
+		rdev->need_dma32 = true;
+	if ((rdev->flags & RADEON_IS_PCI) &&
+	    (rdev->family <= CHIP_RS740))
+		rdev->need_dma32 = true;
+
+	dma_bits = rdev->need_dma32 ? 32 : 40;
+#ifdef FREEBSD_WIP
+	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		rdev->need_dma32 = true;
+		dma_bits = 32;
+		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
+	}
+	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+		printk(KERN_WARNING "radeon: No coherent DMA available.\n");
+	}
+#endif /* FREEBSD_WIP */
+
+	/* Registers mapping */
+	/* TODO: block userspace mapping of io register */
+	DRM_SPININIT(&rdev->mmio_idx_lock, "drm__radeon_device__mmio_idx_lock");
+	rdev->rmmio_rid = PCIR_BAR(2);
+	rdev->rmmio = bus_alloc_resource_any(rdev->dev, SYS_RES_MEMORY,
+	    &rdev->rmmio_rid, RF_ACTIVE | RF_SHAREABLE);
+	if (rdev->rmmio == NULL) {
+		return -ENOMEM;
+	}
+	rdev->rmmio_base = rman_get_start(rdev->rmmio);
+	rdev->rmmio_size = rman_get_size(rdev->rmmio);
+	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
+	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
+
+	/* io port mapping */
+	for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
+		uint32_t data;
+
+		data = pci_read_config(rdev->dev, PCIR_BAR(i), 4);
+		if (PCI_BAR_IO(data)) {
+			rdev->rio_rid = PCIR_BAR(i);
+			rdev->rio_mem = bus_alloc_resource_any(rdev->dev,
+			    SYS_RES_IOPORT, &rdev->rio_rid,
+			    RF_ACTIVE | RF_SHAREABLE);
+			break;
+		}
+	}
+	if (rdev->rio_mem == NULL)
+		DRM_ERROR("Unable to find PCI I/O BAR\n");
+
+	rdev->tq = taskqueue_create("radeonkms", M_WAITOK,
+	    taskqueue_thread_enqueue, &rdev->tq);
+	taskqueue_start_threads(&rdev->tq, 1, PWAIT, "radeon taskq");
+
+#ifdef FREEBSD_WIP
+	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
+	/* this will fail for cards that aren't VGA class devices, just
+	 * ignore it */
+	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
+#endif /* FREEBSD_WIP */
+
+	r = radeon_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_ib_ring_tests(rdev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
+	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
+		/* Acceleration not working on AGP card try again
+		 * with fallback to PCI or PCIE GART
+		 */
+		radeon_asic_reset(rdev);
+		radeon_fini(rdev);
+		radeon_agp_disable(rdev);
+		r = radeon_init(rdev);
+		if (r)
+			return r;
+	}
+
+	DRM_INFO("%s: Taking over the fictitious range 0x%jx-0x%jx\n",
+	    __func__, (uintmax_t)rdev->mc.aper_base,
+	    (uintmax_t)rdev->mc.aper_base + rdev->mc.visible_vram_size);
+	r = vm_phys_fictitious_reg_range(
+	    rdev->mc.aper_base,
+	    rdev->mc.aper_base + rdev->mc.visible_vram_size,
+	    VM_MEMATTR_WRITE_COMBINING);
+	if (r != 0) {
+		DRM_ERROR("Failed to register fictitious range "
+		    "0x%jx-0x%jx (%d).\n", (uintmax_t)rdev->mc.aper_base,
+		    (uintmax_t)rdev->mc.aper_base + rdev->mc.visible_vram_size, r);
+		return (-r);
+	}
+	rdev->fictitious_range_registered = true;
+#if __OS_HAS_AGP
+	if (rdev->flags & RADEON_IS_AGP) {
+		DRM_INFO("%s: Taking over the fictitious range 0x%jx-0x%jx\n",
+		    __func__, (uintmax_t)rdev->mc.agp_base,
+		    (uintmax_t)rdev->mc.agp_base + rdev->mc.gtt_size);
+		r = vm_phys_fictitious_reg_range(
+		    rdev->mc.agp_base,
+		    rdev->mc.agp_base + rdev->mc.gtt_size,
+		    VM_MEMATTR_WRITE_COMBINING);
+		if (r != 0) {
+			DRM_ERROR("Failed to register fictitious range "
+			    "0x%jx-0x%jx (%d).\n", (uintmax_t)rdev->mc.agp_base,
+			    (uintmax_t)rdev->mc.agp_base + rdev->mc.gtt_size, r);
+			return (-r);
+		}
+		rdev->fictitious_agp_range_registered = true;
+	}
+#endif
+
+	if ((radeon_testing & 1)) {
+		radeon_test_moves(rdev);
+	}
+	if ((radeon_testing & 2)) {
+		radeon_test_syncing(rdev);
+	}
+	if (radeon_benchmarking) {
+		radeon_benchmark(rdev, radeon_benchmarking);
+	}
+	return 0;
+}
+
+#ifdef FREEBSD_WIP
+static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+#endif /* FREEBSD_WIP */
+
+/**
+ * radeon_device_fini - tear down the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the driver info (all asics).
+ * Called at driver shutdown.
+ */
+void radeon_device_fini(struct radeon_device *rdev)
+{
+	DRM_INFO("radeon: finishing device.\n");
+	rdev->shutdown = true;
+	/* evict vram memory */
+	radeon_bo_evict_vram(rdev);
+
+	if (rdev->fictitious_range_registered) {
+		vm_phys_fictitious_unreg_range(
+		    rdev->mc.aper_base,
+		    rdev->mc.aper_base + rdev->mc.visible_vram_size);
+	}
+#if __OS_HAS_AGP
+	if (rdev->fictitious_agp_range_registered) {
+		vm_phys_fictitious_unreg_range(
+		    rdev->mc.agp_base,
+		    rdev->mc.agp_base + rdev->mc.gtt_size);
+	}
+#endif
+
+	radeon_fini(rdev);
+#ifdef FREEBSD_WIP
+	vga_switcheroo_unregister_client(rdev->pdev);
+	vga_client_register(rdev->pdev, NULL, NULL, NULL);
+#endif /* FREEBSD_WIP */
+
+	if (rdev->tq != NULL) {
+		taskqueue_free(rdev->tq);
+		rdev->tq = NULL;
+	}
+
+	if (rdev->rio_mem)
+		bus_release_resource(rdev->dev, SYS_RES_IOPORT, rdev->rio_rid,
+		    rdev->rio_mem);
+	rdev->rio_mem = NULL;
+	bus_release_resource(rdev->dev, SYS_RES_MEMORY, rdev->rmmio_rid,
+	    rdev->rmmio);
+	rdev->rmmio = NULL;
+#ifdef FREEBSD_WIP
+	radeon_debugfs_remove_files(rdev);
+#endif /* FREEBSD_WIP */
+}
+
+
+/*
+ * Suspend & resume.
+ */
+/**
+ * radeon_suspend_kms - initiate device suspend
+ *
+ * @pdev: drm dev pointer
+ * @state: suspend state
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int radeon_suspend_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	int i, r;
+	bool force_completion = false;
+
+	if (dev == NULL || dev->dev_private == NULL) {
+		return -ENODEV;
+	}
+#ifdef FREEBSD_WIP
+	if (state.event == PM_EVENT_PRETHAW) {
+		return 0;
+	}
+#endif /* FREEBSD_WIP */
+	rdev = dev->dev_private;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	drm_kms_helper_poll_disable(dev);
+
+	/* turn off display hw */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	}
+
+	/* unpin the front buffers */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
+		struct radeon_bo *robj;
+
+		if (rfb == NULL || rfb->obj == NULL) {
+			continue;
+		}
+		robj = gem_to_radeon_bo(rfb->obj);
+		/* don't unpin kernel fb objects */
+		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
+			r = radeon_bo_reserve(robj, false);
+			if (r == 0) {
+				radeon_bo_unpin(robj);
+				radeon_bo_unreserve(robj);
+			}
+		}
+	}
+	/* evict vram memory */
+	radeon_bo_evict_vram(rdev);
+
+	sx_xlock(&rdev->ring_lock);
+	/* wait for gpu to finish processing current batch */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		r = radeon_fence_wait_empty_locked(rdev, i);
+		if (r) {
+			/* delay GPU reset to resume */
+			force_completion = true;
+		}
+	}
+	if (force_completion) {
+		radeon_fence_driver_force_completion(rdev);
+	}
+	sx_xunlock(&rdev->ring_lock);
+
+	radeon_save_bios_scratch_regs(rdev);
+
+	radeon_pm_suspend(rdev);
+	radeon_suspend(rdev);
+	radeon_hpd_fini(rdev);
+	/* evict remaining vram memory */
+	radeon_bo_evict_vram(rdev);
+
+	radeon_agp_suspend(rdev);
+
+#ifdef FREEBSD_WIP
+	if (state.event == PM_EVENT_SUSPEND) {
+		/* Shut down the device */
+		pci_disable_device(dev->pdev);
+	}
+	console_lock();
+#endif /* FREEBSD_WIP */
+	radeon_fbdev_set_suspend(rdev, 1);
+#ifdef FREEBSD_WIP
+	console_unlock();
+#endif /* FREEBSD_WIP */
+	return 0;
+}
+
+/**
+ * radeon_resume_kms - initiate device resume
+ *
+ * @pdev: drm dev pointer
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
+int radeon_resume_kms(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct radeon_device *rdev = dev->dev_private;
+	int r;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+#ifdef FREEBSD_WIP
+	console_lock();
+	if (pci_enable_device(dev->pdev)) {
+		console_unlock();
+		return -1;
+	}
+#endif /* FREEBSD_WIP */
+	/* resume AGP if in use */
+	radeon_agp_resume(rdev);
+	radeon_resume(rdev);
+
+	r = radeon_ib_ring_tests(rdev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
+	radeon_pm_resume(rdev);
+	radeon_restore_bios_scratch_regs(rdev);
+
+	radeon_fbdev_set_suspend(rdev, 0);
+#ifdef FREEBSD_WIP
+	console_unlock();
+#endif /* FREEBSD_WIP */
+
+	/* init dig PHYs, disp eng pll */
+	if (rdev->is_atom_bios) {
+		radeon_atom_encoder_init(rdev);
+		radeon_atom_disp_eng_pll_init(rdev);
+		/* turn on the BL */
+		if (rdev->mode_info.bl_encoder) {
+			u8 bl_level = radeon_get_backlight_level(rdev,
+								 rdev->mode_info.bl_encoder);
+			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+						   bl_level);
+		}
+	}
+	/* reset hpd state */
+	radeon_hpd_init(rdev);
+	/* blat the mode back in */
+	drm_helper_resume_force_mode(dev);
+	/* turn on display hw */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+	}
+
+	drm_kms_helper_poll_enable(dev);
+	return 0;
+}
+
+/**
+ * radeon_gpu_reset - reset the asic
+ *
+ * @rdev: radeon device pointer
+ *
+ * Attempt the reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+	unsigned ring_sizes[RADEON_NUM_RINGS];
+	uint32_t *ring_data[RADEON_NUM_RINGS];
+
+	bool saved = false;
+
+	int i, r;
+	int resched;
+
+	sx_xlock(&rdev->exclusive_lock);
+	radeon_save_bios_scratch_regs(rdev);
+	/* block TTM */
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+	radeon_suspend(rdev);
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
+						   &ring_data[i]);
+		if (ring_sizes[i]) {
+			saved = true;
+			dev_info(rdev->dev, "Saved %d dwords of commands "
+				 "on ring %d.\n", ring_sizes[i], i);
+		}
+	}
+
+retry:
+	r = radeon_asic_reset(rdev);
+	if (!r) {
+		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
+		radeon_resume(rdev);
+	}
+
+	radeon_restore_bios_scratch_regs(rdev);
+
+	if (!r) {
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			radeon_ring_restore(rdev, &rdev->ring[i],
+					    ring_sizes[i], ring_data[i]);
+			ring_sizes[i] = 0;
+			ring_data[i] = NULL;
+		}
+
+		r = radeon_ib_ring_tests(rdev);
+		if (r) {
+			dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
+			if (saved) {
+				saved = false;
+				radeon_suspend(rdev);
+				goto retry;
+			}
+		}
+	} else {
+		radeon_fence_driver_force_completion(rdev);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			free(ring_data[i], DRM_MEM_DRIVER);
+		}
+	}
+
+	drm_helper_resume_force_mode(rdev->ddev);
+
+	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+	if (r) {
+		/* bad news, how to tell it to userspace ? */
+		dev_info(rdev->dev, "GPU reset failed\n");
+	}
+
+	sx_xunlock(&rdev->exclusive_lock);
+	return r;
+}
+
+
+/*
+ * Debugfs
+ */
+#ifdef FREEBSD_WIP
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned nfiles)
+{
+	unsigned i;
+
+	for (i = 0; i < rdev->debugfs_count; i++) {
+		if (rdev->debugfs[i].files == files) {
+			/* Already registered */
+			return 0;
+		}
+	}
+
+	i = rdev->debugfs_count + 1;
+	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
+		DRM_ERROR("Reached maximum number of debugfs components.\n");
+		DRM_ERROR("Report so we increase "
+		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+		return -EINVAL;
+	}
+	rdev->debugfs[rdev->debugfs_count].files = files;
+	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+	rdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_create_files(files, nfiles,
+				 rdev->ddev->control->debugfs_root,
+				 rdev->ddev->control);
+	drm_debugfs_create_files(files, nfiles,
+				 rdev->ddev->primary->debugfs_root,
+				 rdev->ddev->primary);
+#endif
+	return 0;
+}
+
+static void radeon_debugfs_remove_files(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+
+	for (i = 0; i < rdev->debugfs_count; i++) {
+		drm_debugfs_remove_files(rdev->debugfs[i].files,
+					 rdev->debugfs[i].num_files,
+					 rdev->ddev->control);
+		drm_debugfs_remove_files(rdev->debugfs[i].files,
+					 rdev->debugfs[i].num_files,
+					 rdev->ddev->primary);
+	}
+#endif
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor)
+{
+	return 0;
+}
+
+void radeon_debugfs_cleanup(struct drm_minor *minor)
+{
+}
+#endif /* FREEBSD_WIP */
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_device.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_display.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_display.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_display.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1688 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_display.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/drm_edid.h>
+
+static void avivo_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+	WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
+	WREG32(AVIVO_DC_LUT_RW_MODE, 0);
+	WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
+
+	WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(AVIVO_DC_LUT_30_COLOR,
+			     (radeon_crtc->lut_r[i] << 20) |
+			     (radeon_crtc->lut_g[i] << 10) |
+			     (radeon_crtc->lut_b[i] << 0));
+	}
+
+	WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+}
+
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+		       (radeon_crtc->lut_r[i] << 20) |
+		       (radeon_crtc->lut_g[i] << 10) |
+		       (radeon_crtc->lut_b[i] << 0));
+	}
+}
+
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+	WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+	       NI_GRPH_PRESCALE_BYPASS);
+	WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+	       NI_OVL_PRESCALE_BYPASS);
+	WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+		       (radeon_crtc->lut_r[i] << 20) |
+		       (radeon_crtc->lut_g[i] << 10) |
+		       (radeon_crtc->lut_b[i] << 0));
+	}
+
+	WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+	WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+	WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+	WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
+	WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+
+}
+
+static void legacy_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+	uint32_t dac2_cntl;
+
+	dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+	if (radeon_crtc->crtc_id == 0)
+		dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
+	else
+		dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
+	WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+
+	WREG8(RADEON_PALETTE_INDEX, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(RADEON_PALETTE_30_DATA,
+			     (radeon_crtc->lut_r[i] << 20) |
+			     (radeon_crtc->lut_g[i] << 10) |
+			     (radeon_crtc->lut_b[i] << 0));
+	}
+}
+
+void radeon_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (!crtc->enabled)
+		return;
+
+	if (ASIC_IS_DCE5(rdev))
+		dce5_crtc_load_lut(crtc);
+	else if (ASIC_IS_DCE4(rdev))
+		dce4_crtc_load_lut(crtc);
+	else if (ASIC_IS_AVIVO(rdev))
+		avivo_crtc_load_lut(crtc);
+	else
+		legacy_crtc_load_lut(crtc);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			      u16 blue, int regno)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	radeon_crtc->lut_r[regno] = red >> 6;
+	radeon_crtc->lut_g[regno] = green >> 6;
+	radeon_crtc->lut_b[regno] = blue >> 6;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			      u16 *blue, int regno)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	*red = radeon_crtc->lut_r[regno] << 6;
+	*green = radeon_crtc->lut_g[regno] << 6;
+	*blue = radeon_crtc->lut_b[regno] << 6;
+}
+
+static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				  u16 *blue, uint32_t start, uint32_t size)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	int end = (start + size > 256) ? 256 : start + size, i;
+
+	/* userspace palettes are always correct as is */
+	for (i = start; i < end; i++) {
+		radeon_crtc->lut_r[i] = red[i] >> 6;
+		radeon_crtc->lut_g[i] = green[i] >> 6;
+		radeon_crtc->lut_b[i] = blue[i] >> 6;
+	}
+	radeon_crtc_load_lut(crtc);
+}
+
+static void radeon_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	free(radeon_crtc, DRM_MEM_DRIVER);
+}
+
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void radeon_unpin_work_func(void *arg, int pending)
+{
+	struct radeon_unpin_work *work = arg;
+	int r;
+
+	/* unpin of the old buffer */
+	r = radeon_bo_reserve(work->old_rbo, false);
+	if (likely(r == 0)) {
+		r = radeon_bo_unpin(work->old_rbo);
+		if (unlikely(r != 0)) {
+			DRM_ERROR("failed to unpin buffer after flip\n");
+		}
+		radeon_bo_unreserve(work->old_rbo);
+	} else
+		DRM_ERROR("failed to reserve buffer after flip\n");
+
+	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	free(work, DRM_MEM_DRIVER);
+}
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	struct radeon_unpin_work *work;
+	struct drm_pending_vblank_event *e;
+	struct timeval now;
+	unsigned long flags;
+	u32 update_pending;
+	int vpos, hpos;
+
+	DRM_SPINLOCK_IRQSAVE(&rdev->ddev->event_lock, flags);
+	work = radeon_crtc->unpin_work;
+	if (work == NULL ||
+	    (work->fence && !radeon_fence_signaled(work->fence))) {
+		DRM_SPINUNLOCK_IRQRESTORE(&rdev->ddev->event_lock, flags);
+		return;
+	}
+	/* New pageflip, or just completion of a previous one? */
+	if (!radeon_crtc->deferred_flip_completion) {
+		/* do the flip (mmio) */
+		update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+	} else {
+		/* This is just a completion of a flip queued in crtc
+		 * at last invocation. Make sure we go directly to
+		 * completion routine.
+		 */
+		update_pending = 0;
+		radeon_crtc->deferred_flip_completion = 0;
+	}
+
+	/* Has the pageflip already completed in crtc, or is it certain
+	 * to complete in this vblank?
+	 */
+	if (update_pending &&
+	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+							       &vpos, &hpos)) &&
+	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
+	     (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
+		/* crtc didn't flip in this target vblank interval,
+		 * but flip is pending in crtc. Based on the current
+		 * scanout position we know that the current frame is
+		 * (nearly) complete and the flip will (likely)
+		 * complete before the start of the next frame.
+		 */
+		update_pending = 0;
+	}
+	if (update_pending) {
+		/* crtc didn't flip in this target vblank interval,
+		 * but flip is pending in crtc. It will complete it
+		 * in next vblank interval, so complete the flip at
+		 * next vblank irq.
+		 */
+		radeon_crtc->deferred_flip_completion = 1;
+		DRM_SPINUNLOCK_IRQRESTORE(&rdev->ddev->event_lock, flags);
+		return;
+	}
+
+	/* Pageflip (will be) certainly completed in this vblank. Clean up. */
+	radeon_crtc->unpin_work = NULL;
+
+	/* wakeup userspace */
+	if (work->event) {
+		e = work->event;
+		e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+		drm_event_wakeup(&e->base);
+	}
+	DRM_SPINUNLOCK_IRQRESTORE(&rdev->ddev->event_lock, flags);
+
+	drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+	radeon_fence_unref(&work->fence);
+	radeon_post_page_flip(work->rdev, work->crtc_id);
+	taskqueue_enqueue(rdev->tq, &work->work);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_framebuffer *old_radeon_fb;
+	struct radeon_framebuffer *new_radeon_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	struct radeon_unpin_work *work;
+	unsigned long flags;
+	u32 tiling_flags, pitch_pixels;
+	u64 base;
+	int r;
+
+	work = malloc(sizeof *work, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (work == NULL)
+		return -ENOMEM;
+
+	work->event = event;
+	work->rdev = rdev;
+	work->crtc_id = radeon_crtc->crtc_id;
+	old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+	new_radeon_fb = to_radeon_framebuffer(fb);
+	/* schedule unpin of the old buffer */
+	obj = old_radeon_fb->obj;
+	/* take a reference to the old object */
+	drm_gem_object_reference(obj);
+	rbo = gem_to_radeon_bo(obj);
+	work->old_rbo = rbo;
+	obj = new_radeon_fb->obj;
+	rbo = gem_to_radeon_bo(obj);
+
+	mtx_lock(&rbo->tbo.bdev->fence_lock);
+	if (rbo->tbo.sync_obj)
+		work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+	mtx_unlock(&rbo->tbo.bdev->fence_lock);
+
+	TASK_INIT(&work->work, 0, radeon_unpin_work_func, work);
+
+	/* We borrow the event spin lock for protecting unpin_work */
+	DRM_SPINLOCK_IRQSAVE(&dev->event_lock, flags);
+	if (radeon_crtc->unpin_work) {
+		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+		r = -EBUSY;
+		goto unlock_free;
+	}
+	radeon_crtc->unpin_work = work;
+	radeon_crtc->deferred_flip_completion = 0;
+	DRM_SPINUNLOCK_IRQRESTORE(&dev->event_lock, flags);
+
+	/* pin the new buffer */
+	DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+			 work->old_rbo, rbo);
+
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+		goto pflip_cleanup;
+	}
+	/* Only 27 bit offset for legacy CRTC */
+	r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+				     ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+		r = -EINVAL;
+		DRM_ERROR("failed to pin new rbo buffer before flip\n");
+		goto pflip_cleanup;
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		/* crtc offset is from display base addr not FB location */
+		base -= radeon_crtc->legacy_display_base_addr;
+		pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
+
+		if (tiling_flags & RADEON_TILING_MACRO) {
+			if (ASIC_IS_R300(rdev)) {
+				base &= ~0x7ff;
+			} else {
+				int byteshift = fb->bits_per_pixel >> 4;
+				int tile_addr = (((crtc->y >> 3) * pitch_pixels +  crtc->x) >> (8 - byteshift)) << 11;
+				base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+			}
+		} else {
+			int offset = crtc->y * pitch_pixels + crtc->x;
+			switch (fb->bits_per_pixel) {
+			case 8:
+			default:
+				offset *= 1;
+				break;
+			case 15:
+			case 16:
+				offset *= 2;
+				break;
+			case 24:
+				offset *= 3;
+				break;
+			case 32:
+				offset *= 4;
+				break;
+			}
+			base += offset;
+		}
+		base &= ~7;
+	}
+
+	DRM_SPINLOCK_IRQSAVE(&dev->event_lock, flags);
+	work->new_crtc_base = base;
+	DRM_SPINUNLOCK_IRQRESTORE(&dev->event_lock, flags);
+
+	/* update crtc fb */
+	crtc->fb = fb;
+
+	r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+	if (r) {
+		DRM_ERROR("failed to get vblank before flip\n");
+		goto pflip_cleanup1;
+	}
+
+	/* set the proper interrupt */
+	radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+
+	return 0;
+
+pflip_cleanup1:
+	if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
+		DRM_ERROR("failed to reserve new rbo in error path\n");
+		goto pflip_cleanup;
+	}
+	if (unlikely(radeon_bo_unpin(rbo) != 0)) {
+		DRM_ERROR("failed to unpin new rbo in error path\n");
+	}
+	radeon_bo_unreserve(rbo);
+
+pflip_cleanup:
+	DRM_SPINLOCK_IRQSAVE(&dev->event_lock, flags);
+	radeon_crtc->unpin_work = NULL;
+unlock_free:
+	DRM_SPINUNLOCK_IRQRESTORE(&dev->event_lock, flags);
+	drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
+	radeon_fence_unref(&work->fence);
+	free(work, DRM_MEM_DRIVER);
+
+	return r;
+}
+
+static const struct drm_crtc_funcs radeon_crtc_funcs = {
+	.cursor_set = radeon_crtc_cursor_set,
+	.cursor_move = radeon_crtc_cursor_move,
+	.gamma_set = radeon_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = radeon_crtc_destroy,
+	.page_flip = radeon_crtc_page_flip,
+};
+
+static void radeon_crtc_init(struct drm_device *dev, int index)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc;
+	int i;
+
+	radeon_crtc = malloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (radeon_crtc == NULL)
+		return;
+
+	drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+	radeon_crtc->crtc_id = index;
+	rdev->mode_info.crtcs[index] = radeon_crtc;
+
+#if 0
+	radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+	radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+	radeon_crtc->mode_set.num_connectors = 0;
+#endif
+
+	for (i = 0; i < 256; i++) {
+		radeon_crtc->lut_r[i] = i << 2;
+		radeon_crtc->lut_g[i] = i << 2;
+		radeon_crtc->lut_b[i] = i << 2;
+	}
+
+	if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+		radeon_atombios_init_crtc(dev, radeon_crtc);
+	else
+		radeon_legacy_init_crtc(dev, radeon_crtc);
+}
+
+static const char *encoder_names[37] = {
+	"NONE",
+	"INTERNAL_LVDS",
+	"INTERNAL_TMDS1",
+	"INTERNAL_TMDS2",
+	"INTERNAL_DAC1",
+	"INTERNAL_DAC2",
+	"INTERNAL_SDVOA",
+	"INTERNAL_SDVOB",
+	"SI170B",
+	"CH7303",
+	"CH7301",
+	"INTERNAL_DVO1",
+	"EXTERNAL_SDVOA",
+	"EXTERNAL_SDVOB",
+	"TITFP513",
+	"INTERNAL_LVTM1",
+	"VT1623",
+	"HDMI_SI1930",
+	"HDMI_INTERNAL",
+	"INTERNAL_KLDSCP_TMDS1",
+	"INTERNAL_KLDSCP_DVO1",
+	"INTERNAL_KLDSCP_DAC1",
+	"INTERNAL_KLDSCP_DAC2",
+	"SI178",
+	"MVPU_FPGA",
+	"INTERNAL_DDI",
+	"VT1625",
+	"HDMI_SI1932",
+	"DP_AN9801",
+	"DP_DP501",
+	"INTERNAL_UNIPHY",
+	"INTERNAL_KLDSCP_LVTMA",
+	"INTERNAL_UNIPHY1",
+	"INTERNAL_UNIPHY2",
+	"NUTMEG",
+	"TRAVIS",
+	"INTERNAL_VCE"
+};
+
+static const char *hpd_names[6] = {
+	"HPD1",
+	"HPD2",
+	"HPD3",
+	"HPD4",
+	"HPD5",
+	"HPD6",
+};
+
+static void radeon_print_display_setup(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	uint32_t devices;
+	int i = 0;
+
+	DRM_INFO("Radeon Display Connectors\n");
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		DRM_INFO("Connector %d:\n", i);
+		DRM_INFO("  %s\n", drm_get_connector_name(connector));
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
+		if (radeon_connector->ddc_bus) {
+			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				 radeon_connector->ddc_bus->rec.mask_clk_reg,
+				 radeon_connector->ddc_bus->rec.mask_data_reg,
+				 radeon_connector->ddc_bus->rec.a_clk_reg,
+				 radeon_connector->ddc_bus->rec.a_data_reg,
+				 radeon_connector->ddc_bus->rec.en_clk_reg,
+				 radeon_connector->ddc_bus->rec.en_data_reg,
+				 radeon_connector->ddc_bus->rec.y_clk_reg,
+				 radeon_connector->ddc_bus->rec.y_data_reg);
+			if (radeon_connector->router.ddc_valid)
+				DRM_INFO("  DDC Router 0x%x/0x%x\n",
+					 radeon_connector->router.ddc_mux_control_pin,
+					 radeon_connector->router.ddc_mux_state);
+			if (radeon_connector->router.cd_valid)
+				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
+					 radeon_connector->router.cd_mux_control_pin,
+					 radeon_connector->router.cd_mux_state);
+		} else {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati at lists.x.org\n");
+		}
+		DRM_INFO("  Encoders:\n");
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			radeon_encoder = to_radeon_encoder(encoder);
+			devices = radeon_encoder->devices & radeon_connector->devices;
+			if (devices) {
+				if (devices & ATOM_DEVICE_CRT1_SUPPORT)
+					DRM_INFO("    CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_CRT2_SUPPORT)
+					DRM_INFO("    CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_LCD1_SUPPORT)
+					DRM_INFO("    LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP1_SUPPORT)
+					DRM_INFO("    DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP2_SUPPORT)
+					DRM_INFO("    DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP3_SUPPORT)
+					DRM_INFO("    DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP4_SUPPORT)
+					DRM_INFO("    DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP5_SUPPORT)
+					DRM_INFO("    DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+					DRM_INFO("    DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_TV1_SUPPORT)
+					DRM_INFO("    TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_CV_SUPPORT)
+					DRM_INFO("    CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
+			}
+		}
+		i++;
+	}
+}
+
+static bool radeon_setup_enc_conn(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	bool ret = false;
+
+	if (rdev->bios) {
+		if (rdev->is_atom_bios) {
+			ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+			if (ret == false)
+				ret = radeon_get_atom_connector_info_from_object_table(dev);
+		} else {
+			ret = radeon_get_legacy_connector_info_from_bios(dev);
+			if (ret == false)
+				ret = radeon_get_legacy_connector_info_from_table(dev);
+		}
+	} else {
+		if (!ASIC_IS_AVIVO(rdev))
+			ret = radeon_get_legacy_connector_info_from_table(dev);
+	}
+	if (ret) {
+		radeon_setup_encoder_clones(dev);
+		radeon_print_display_setup(dev);
+	}
+
+	return ret;
+}
+
+int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+{
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ret = 0;
+
+	/* on hw with routers, select right port */
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
+
+	if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+	    ENCODER_OBJECT_ID_NONE) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+		if (dig->dp_i2c_bus)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      dig->dp_i2c_bus->adapter);
+	} else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+		   (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+		if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+		     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      dig->dp_i2c_bus->adapter);
+		else if (radeon_connector->ddc_bus && !radeon_connector->edid)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      radeon_connector->ddc_bus->adapter);
+	} else {
+		if (radeon_connector->ddc_bus && !radeon_connector->edid)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      radeon_connector->ddc_bus->adapter);
+	}
+
+	if (!radeon_connector->edid) {
+		if (rdev->is_atom_bios) {
+			/* some laptops provide a hardcoded edid in rom for LCDs */
+			if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+			     (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+				radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+		} else
+			/* some servers provide a hardcoded edid in rom for KVMs */
+			radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+	}
+	if (radeon_connector->edid) {
+		drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+		ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+		return ret;
+	}
+	drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+	return 0;
+}
+
+/* avivo */
+static void avivo_get_fb_div(struct radeon_pll *pll,
+			     u32 target_clock,
+			     u32 post_div,
+			     u32 ref_div,
+			     u32 *fb_div,
+			     u32 *frac_fb_div)
+{
+	u32 tmp = post_div * ref_div;
+
+	tmp *= target_clock;
+	*fb_div = tmp / pll->reference_freq;
+	*frac_fb_div = tmp % pll->reference_freq;
+
+        if (*fb_div > pll->max_feedback_div)
+		*fb_div = pll->max_feedback_div;
+        else if (*fb_div < pll->min_feedback_div)
+                *fb_div = pll->min_feedback_div;
+}
+
+static u32 avivo_get_post_div(struct radeon_pll *pll,
+			      u32 target_clock)
+{
+	u32 vco, post_div, tmp;
+
+	if (pll->flags & RADEON_PLL_USE_POST_DIV)
+		return pll->post_div;
+
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+		if (pll->flags & RADEON_PLL_IS_LCD)
+			vco = pll->lcd_pll_out_min;
+		else
+			vco = pll->pll_out_min;
+	} else {
+		if (pll->flags & RADEON_PLL_IS_LCD)
+			vco = pll->lcd_pll_out_max;
+		else
+			vco = pll->pll_out_max;
+	}
+
+	post_div = vco / target_clock;
+	tmp = vco % target_clock;
+
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+		if (tmp)
+			post_div++;
+	} else {
+		if (!tmp)
+			post_div--;
+	}
+
+	if (post_div > pll->max_post_div)
+		post_div = pll->max_post_div;
+	else if (post_div < pll->min_post_div)
+		post_div = pll->min_post_div;
+
+	return post_div;
+}
+
+#define MAX_TOLERANCE 10
+
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+			      u32 freq,
+			      u32 *dot_clock_p,
+			      u32 *fb_div_p,
+			      u32 *frac_fb_div_p,
+			      u32 *ref_div_p,
+			      u32 *post_div_p)
+{
+	u32 target_clock = freq / 10;
+	u32 post_div = avivo_get_post_div(pll, target_clock);
+	u32 ref_div = pll->min_ref_div;
+	u32 fb_div = 0, frac_fb_div = 0, tmp;
+
+	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+		ref_div = pll->reference_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
+		frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
+		if (frac_fb_div >= 5) {
+			frac_fb_div -= 5;
+			frac_fb_div = frac_fb_div / 10;
+			frac_fb_div++;
+		}
+		if (frac_fb_div >= 10) {
+			fb_div++;
+			frac_fb_div = 0;
+		}
+	} else {
+		while (ref_div <= pll->max_ref_div) {
+			avivo_get_fb_div(pll, target_clock, post_div, ref_div,
+					 &fb_div, &frac_fb_div);
+			if (frac_fb_div >= (pll->reference_freq / 2))
+				fb_div++;
+			frac_fb_div = 0;
+			tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
+			tmp = (tmp * 10000) / target_clock;
+
+			if (tmp > (10000 + MAX_TOLERANCE))
+				ref_div++;
+			else if (tmp >= (10000 - MAX_TOLERANCE))
+				break;
+			else
+				ref_div++;
+		}
+	}
+
+	*dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
+		(ref_div * post_div * 10);
+	*fb_div_p = fb_div;
+	*frac_fb_div_p = frac_fb_div;
+	*ref_div_p = ref_div;
+	*post_div_p = post_div;
+	DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
+}
+
+/* pre-avivo */
+static inline uint32_t radeon_div(uint64_t n, uint32_t d)
+{
+	uint64_t mod;
+
+	n += d / 2;
+
+	mod = do_div(n, d);
+	return n;
+}
+
+void radeon_compute_pll_legacy(struct radeon_pll *pll,
+			       uint64_t freq,
+			       uint32_t *dot_clock_p,
+			       uint32_t *fb_div_p,
+			       uint32_t *frac_fb_div_p,
+			       uint32_t *ref_div_p,
+			       uint32_t *post_div_p)
+{
+	uint32_t min_ref_div = pll->min_ref_div;
+	uint32_t max_ref_div = pll->max_ref_div;
+	uint32_t min_post_div = pll->min_post_div;
+	uint32_t max_post_div = pll->max_post_div;
+	uint32_t min_fractional_feed_div = 0;
+	uint32_t max_fractional_feed_div = 0;
+	uint32_t best_vco = pll->best_vco;
+	uint32_t best_post_div = 1;
+	uint32_t best_ref_div = 1;
+	uint32_t best_feedback_div = 1;
+	uint32_t best_frac_feedback_div = 0;
+	uint32_t best_freq = -1;
+	uint32_t best_error = 0xffffffff;
+	uint32_t best_vco_diff = 1;
+	uint32_t post_div;
+	u32 pll_out_min, pll_out_max;
+
+	DRM_DEBUG_KMS("PLL freq %ju %u %u\n", (uintmax_t)freq, pll->min_ref_div, pll->max_ref_div);
+	freq = freq * 1000;
+
+	if (pll->flags & RADEON_PLL_IS_LCD) {
+		pll_out_min = pll->lcd_pll_out_min;
+		pll_out_max = pll->lcd_pll_out_max;
+	} else {
+		pll_out_min = pll->pll_out_min;
+		pll_out_max = pll->pll_out_max;
+	}
+
+	if (pll_out_min > 64800)
+		pll_out_min = 64800;
+
+	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+		min_ref_div = max_ref_div = pll->reference_div;
+	else {
+		while (min_ref_div < max_ref_div-1) {
+			uint32_t mid = (min_ref_div + max_ref_div) / 2;
+			uint32_t pll_in = pll->reference_freq / mid;
+			if (pll_in < pll->pll_in_min)
+				max_ref_div = mid;
+			else if (pll_in > pll->pll_in_max)
+				min_ref_div = mid;
+			else
+				break;
+		}
+	}
+
+	if (pll->flags & RADEON_PLL_USE_POST_DIV)
+		min_post_div = max_post_div = pll->post_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		min_fractional_feed_div = pll->min_frac_feedback_div;
+		max_fractional_feed_div = pll->max_frac_feedback_div;
+	}
+
+	for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
+		uint32_t ref_div;
+
+		if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+			continue;
+
+		/* legacy radeons only have a few post_divs */
+		if (pll->flags & RADEON_PLL_LEGACY) {
+			if ((post_div == 5) ||
+			    (post_div == 7) ||
+			    (post_div == 9) ||
+			    (post_div == 10) ||
+			    (post_div == 11) ||
+			    (post_div == 13) ||
+			    (post_div == 14) ||
+			    (post_div == 15))
+				continue;
+		}
+
+		for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
+			uint32_t feedback_div, current_freq = 0, error, vco_diff;
+			uint32_t pll_in = pll->reference_freq / ref_div;
+			uint32_t min_feed_div = pll->min_feedback_div;
+			uint32_t max_feed_div = pll->max_feedback_div + 1;
+
+			if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
+				continue;
+
+			while (min_feed_div < max_feed_div) {
+				uint32_t vco;
+				uint32_t min_frac_feed_div = min_fractional_feed_div;
+				uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
+				uint32_t frac_feedback_div;
+				uint64_t tmp;
+
+				feedback_div = (min_feed_div + max_feed_div) / 2;
+
+				tmp = (uint64_t)pll->reference_freq * feedback_div;
+				vco = radeon_div(tmp, ref_div);
+
+				if (vco < pll_out_min) {
+					min_feed_div = feedback_div + 1;
+					continue;
+				} else if (vco > pll_out_max) {
+					max_feed_div = feedback_div;
+					continue;
+				}
+
+				while (min_frac_feed_div < max_frac_feed_div) {
+					frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
+					tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
+					tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
+					current_freq = radeon_div(tmp, ref_div * post_div);
+
+					if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
+						if (freq < current_freq)
+							error = 0xffffffff;
+						else
+							error = freq - current_freq;
+					} else
+						error = abs(current_freq - freq);
+					vco_diff = abs(vco - best_vco);
+
+					if ((best_vco == 0 && error < best_error) ||
+					    (best_vco != 0 &&
+					     ((best_error > 100 && error < best_error - 100) ||
+					      (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
+						best_post_div = post_div;
+						best_ref_div = ref_div;
+						best_feedback_div = feedback_div;
+						best_frac_feedback_div = frac_feedback_div;
+						best_freq = current_freq;
+						best_error = error;
+						best_vco_diff = vco_diff;
+					} else if (current_freq == freq) {
+						if (best_freq == -1) {
+							best_post_div = post_div;
+							best_ref_div = ref_div;
+							best_feedback_div = feedback_div;
+							best_frac_feedback_div = frac_feedback_div;
+							best_freq = current_freq;
+							best_error = error;
+							best_vco_diff = vco_diff;
+						} else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+							best_post_div = post_div;
+							best_ref_div = ref_div;
+							best_feedback_div = feedback_div;
+							best_frac_feedback_div = frac_feedback_div;
+							best_freq = current_freq;
+							best_error = error;
+							best_vco_diff = vco_diff;
+						}
+					}
+					if (current_freq < freq)
+						min_frac_feed_div = frac_feedback_div + 1;
+					else
+						max_frac_feed_div = frac_feedback_div;
+				}
+				if (current_freq < freq)
+					min_feed_div = feedback_div + 1;
+				else
+					max_feed_div = feedback_div;
+			}
+		}
+	}
+
+	*dot_clock_p = best_freq / 10000;
+	*fb_div_p = best_feedback_div;
+	*frac_fb_div_p = best_frac_feedback_div;
+	*ref_div_p = best_ref_div;
+	*post_div_p = best_post_div;
+	DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      (long long)freq,
+		      best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+		      best_ref_div, best_post_div);
+
+}
+
+static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+
+	if (radeon_fb->obj) {
+		drm_gem_object_unreference_unlocked(radeon_fb->obj);
+	}
+	drm_framebuffer_cleanup(fb);
+	free(radeon_fb, DRM_MEM_DRIVER);
+}
+
+static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+						  struct drm_file *file_priv,
+						  unsigned int *handle)
+{
+	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+
+	return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs radeon_fb_funcs = {
+	.destroy = radeon_user_framebuffer_destroy,
+	.create_handle = radeon_user_framebuffer_create_handle,
+};
+
+int
+radeon_framebuffer_init(struct drm_device *dev,
+			struct radeon_framebuffer *rfb,
+			struct drm_mode_fb_cmd2 *mode_cmd,
+			struct drm_gem_object *obj)
+{
+	int ret;
+	rfb->obj = obj;
+	ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+	if (ret) {
+		rfb->obj = NULL;
+		return ret;
+	}
+	drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+	return 0;
+}
+
+static int
+radeon_user_framebuffer_create(struct drm_device *dev,
+			       struct drm_file *file_priv,
+			       struct drm_mode_fb_cmd2 *mode_cmd,
+			       struct drm_framebuffer **res)
+{
+	struct drm_gem_object *obj;
+	struct radeon_framebuffer *radeon_fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	if (obj ==  NULL) {
+		dev_err(dev->dev, "No GEM object associated to handle 0x%08X, "
+			"can't create framebuffer\n", mode_cmd->handles[0]);
+		return -ENOENT;
+	}
+
+	radeon_fb = malloc(sizeof(*radeon_fb), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (radeon_fb == NULL) {
+		drm_gem_object_unreference_unlocked(obj);
+		return (-ENOMEM);
+	}
+
+	ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+	if (ret) {
+		free(radeon_fb, DRM_MEM_DRIVER);
+		drm_gem_object_unreference_unlocked(obj);
+		return ret;
+	}
+
+	*res = &radeon_fb->base;
+	return 0;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	radeon_fb_output_poll_changed(rdev);
+}
+
+static const struct drm_mode_config_funcs radeon_mode_funcs = {
+	.fb_create = radeon_user_framebuffer_create,
+	.output_poll_changed = radeon_output_poll_changed
+};
+
+static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
+{	{ 0, "driver" },
+	{ 1, "bios" },
+};
+
+static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
+{	{ TV_STD_NTSC, "ntsc" },
+	{ TV_STD_PAL, "pal" },
+	{ TV_STD_PAL_M, "pal-m" },
+	{ TV_STD_PAL_60, "pal-60" },
+	{ TV_STD_NTSC_J, "ntsc-j" },
+	{ TV_STD_SCART_PAL, "scart-pal" },
+	{ TV_STD_PAL_CN, "pal-cn" },
+	{ TV_STD_SECAM, "secam" },
+};
+
+static struct drm_prop_enum_list radeon_underscan_enum_list[] =
+{	{ UNDERSCAN_OFF, "off" },
+	{ UNDERSCAN_ON, "on" },
+	{ UNDERSCAN_AUTO, "auto" },
+};
+
+static int radeon_modeset_create_props(struct radeon_device *rdev)
+{
+	int sz;
+
+	if (rdev->is_atom_bios) {
+		rdev->mode_info.coherent_mode_property =
+			drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
+		if (!rdev->mode_info.coherent_mode_property)
+			return -ENOMEM;
+	}
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
+		rdev->mode_info.tmds_pll_property =
+			drm_property_create_enum(rdev->ddev, 0,
+					    "tmds_pll",
+					    radeon_tmds_pll_enum_list, sz);
+	}
+
+	rdev->mode_info.load_detect_property =
+		drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
+	if (!rdev->mode_info.load_detect_property)
+		return -ENOMEM;
+
+	drm_mode_create_scaling_mode_property(rdev->ddev);
+
+	sz = ARRAY_SIZE(radeon_tv_std_enum_list);
+	rdev->mode_info.tv_std_property =
+		drm_property_create_enum(rdev->ddev, 0,
+				    "tv standard",
+				    radeon_tv_std_enum_list, sz);
+
+	sz = ARRAY_SIZE(radeon_underscan_enum_list);
+	rdev->mode_info.underscan_property =
+		drm_property_create_enum(rdev->ddev, 0,
+				    "underscan",
+				    radeon_underscan_enum_list, sz);
+
+	rdev->mode_info.underscan_hborder_property =
+		drm_property_create_range(rdev->ddev, 0,
+					"underscan hborder", 0, 128);
+	if (!rdev->mode_info.underscan_hborder_property)
+		return -ENOMEM;
+
+	rdev->mode_info.underscan_vborder_property =
+		drm_property_create_range(rdev->ddev, 0,
+					"underscan vborder", 0, 128);
+	if (!rdev->mode_info.underscan_vborder_property)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void radeon_update_display_priority(struct radeon_device *rdev)
+{
+	/* adjustment options for the display watermarks */
+	if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
+		/* set display priority to high for r3xx, rv515 chips
+		 * this avoids flickering due to underflow to the
+		 * display controllers during heavy acceleration.
+		 * Don't force high on rs4xx igp chips as it seems to
+		 * affect the sound card.  See kernel bug 15982.
+		 */
+		if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+		    !(rdev->flags & RADEON_IS_IGP))
+			rdev->disp_priority = 2;
+		else
+			rdev->disp_priority = 0;
+	} else
+		rdev->disp_priority = radeon_disp_priority;
+
+}
+
+/*
+ * Allocate hdmi structs and determine register offsets
+ */
+static void radeon_afmt_init(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
+		rdev->mode_info.afmt[i] = NULL;
+
+	if (ASIC_IS_DCE6(rdev)) {
+		/* todo */
+	} else if (ASIC_IS_DCE4(rdev)) {
+		/* DCE4/5 has 6 audio blocks tied to DIG encoders */
+		/* DCE4.1 has 2 audio blocks tied to DIG encoders */
+		rdev->mode_info.afmt[0] = malloc(sizeof(struct radeon_afmt),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (rdev->mode_info.afmt[0]) {
+			rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+			rdev->mode_info.afmt[0]->id = 0;
+		}
+		rdev->mode_info.afmt[1] = malloc(sizeof(struct radeon_afmt),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (rdev->mode_info.afmt[1]) {
+			rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+			rdev->mode_info.afmt[1]->id = 1;
+		}
+		if (!ASIC_IS_DCE41(rdev)) {
+			rdev->mode_info.afmt[2] = malloc(sizeof(struct radeon_afmt),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (rdev->mode_info.afmt[2]) {
+				rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+				rdev->mode_info.afmt[2]->id = 2;
+			}
+			rdev->mode_info.afmt[3] = malloc(sizeof(struct radeon_afmt),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (rdev->mode_info.afmt[3]) {
+				rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+				rdev->mode_info.afmt[3]->id = 3;
+			}
+			rdev->mode_info.afmt[4] = malloc(sizeof(struct radeon_afmt),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (rdev->mode_info.afmt[4]) {
+				rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+				rdev->mode_info.afmt[4]->id = 4;
+			}
+			rdev->mode_info.afmt[5] = malloc(sizeof(struct radeon_afmt),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (rdev->mode_info.afmt[5]) {
+				rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+				rdev->mode_info.afmt[5]->id = 5;
+			}
+		}
+	} else if (ASIC_IS_DCE3(rdev)) {
+		/* DCE3.x has 2 audio blocks tied to DIG encoders */
+		rdev->mode_info.afmt[0] = malloc(sizeof(struct radeon_afmt),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (rdev->mode_info.afmt[0]) {
+			rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
+			rdev->mode_info.afmt[0]->id = 0;
+		}
+		rdev->mode_info.afmt[1] = malloc(sizeof(struct radeon_afmt),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (rdev->mode_info.afmt[1]) {
+			rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
+			rdev->mode_info.afmt[1]->id = 1;
+		}
+	} else if (ASIC_IS_DCE2(rdev)) {
+		/* DCE2 has at least 1 routable audio block */
+		rdev->mode_info.afmt[0] = malloc(sizeof(struct radeon_afmt),
+		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (rdev->mode_info.afmt[0]) {
+			rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
+			rdev->mode_info.afmt[0]->id = 0;
+		}
+		/* r6xx has 2 routable audio blocks */
+		if (rdev->family >= CHIP_R600) {
+			rdev->mode_info.afmt[1] = malloc(sizeof(struct radeon_afmt),
+			    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+			if (rdev->mode_info.afmt[1]) {
+				rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
+				rdev->mode_info.afmt[1]->id = 1;
+			}
+		}
+	}
+}
+
+static void radeon_afmt_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
+		free(rdev->mode_info.afmt[i], DRM_MEM_DRIVER);
+		rdev->mode_info.afmt[i] = NULL;
+	}
+}
+
+int radeon_modeset_init(struct radeon_device *rdev)
+{
+	int i;
+	int ret;
+
+	drm_mode_config_init(rdev->ddev);
+	rdev->mode_info.mode_config_initialized = true;
+
+	rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
+
+	if (ASIC_IS_DCE5(rdev)) {
+		rdev->ddev->mode_config.max_width = 16384;
+		rdev->ddev->mode_config.max_height = 16384;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		rdev->ddev->mode_config.max_width = 8192;
+		rdev->ddev->mode_config.max_height = 8192;
+	} else {
+		rdev->ddev->mode_config.max_width = 4096;
+		rdev->ddev->mode_config.max_height = 4096;
+	}
+
+	rdev->ddev->mode_config.preferred_depth = 24;
+	rdev->ddev->mode_config.prefer_shadow = 1;
+
+	rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
+
+	ret = radeon_modeset_create_props(rdev);
+	if (ret) {
+		return ret;
+	}
+
+	/* init i2c buses */
+	radeon_i2c_init(rdev);
+
+	/* check combios for a valid hardcoded EDID - Sun servers */
+	if (!rdev->is_atom_bios) {
+		/* check for hardcoded EDID in BIOS */
+		radeon_combios_check_hardcoded_edid(rdev);
+	}
+
+	/* allocate crtcs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		radeon_crtc_init(rdev->ddev, i);
+	}
+
+	/* okay we should have all the bios connectors */
+	ret = radeon_setup_enc_conn(rdev->ddev);
+	if (!ret) {
+		return ret;
+	}
+
+	/* init dig PHYs, disp eng pll */
+	if (rdev->is_atom_bios) {
+		radeon_atom_encoder_init(rdev);
+		radeon_atom_disp_eng_pll_init(rdev);
+	}
+
+	/* initialize hpd */
+	radeon_hpd_init(rdev);
+
+	/* setup afmt */
+	radeon_afmt_init(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	radeon_fbdev_init(rdev);
+	drm_kms_helper_poll_init(rdev->ddev);
+
+	return 0;
+}
+
+void radeon_modeset_fini(struct radeon_device *rdev)
+{
+	radeon_fbdev_fini(rdev);
+	free(rdev->mode_info.bios_hardcoded_edid, DRM_MEM_KMS);
+	radeon_pm_fini(rdev);
+
+	if (rdev->mode_info.mode_config_initialized) {
+		radeon_afmt_fini(rdev);
+		drm_kms_helper_poll_fini(rdev->ddev);
+		radeon_hpd_fini(rdev);
+		drm_mode_config_cleanup(rdev->ddev);
+		rdev->mode_info.mode_config_initialized = false;
+	}
+	/* free i2c buses */
+	radeon_i2c_fini(rdev);
+}
+
+static bool is_hdtv_mode(const struct drm_display_mode *mode)
+{
+	/* try and guess if this is a tv or a monitor */
+	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+	    (mode->vdisplay == 576) || /* 576p */
+	    (mode->vdisplay == 720) || /* 720p */
+	    (mode->vdisplay == 1080)) /* 1080p */
+		return true;
+	else
+		return false;
+}
+
+bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_encoder *radeon_encoder;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	bool first = true;
+	u32 src_v = 1, dst_v = 1;
+	u32 src_h = 1, dst_h = 1;
+
+	radeon_crtc->h_border = 0;
+	radeon_crtc->v_border = 0;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		radeon_encoder = to_radeon_encoder(encoder);
+		connector = radeon_get_connector_for_encoder(encoder);
+		radeon_connector = to_radeon_connector(connector);
+
+		if (first) {
+			/* set scaling */
+			if (radeon_encoder->rmx_type == RMX_OFF)
+				radeon_crtc->rmx_type = RMX_OFF;
+			else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+				 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+				radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+			else
+				radeon_crtc->rmx_type = RMX_OFF;
+			/* copy native mode */
+			memcpy(&radeon_crtc->native_mode,
+			       &radeon_encoder->native_mode,
+				sizeof(struct drm_display_mode));
+			src_v = crtc->mode.vdisplay;
+			dst_v = radeon_crtc->native_mode.vdisplay;
+			src_h = crtc->mode.hdisplay;
+			dst_h = radeon_crtc->native_mode.hdisplay;
+
+			/* fix up for overscan on hdmi */
+			if (ASIC_IS_AVIVO(rdev) &&
+			    (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
+			    ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
+			     ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
+			      drm_detect_hdmi_monitor(radeon_connector->edid) &&
+			      is_hdtv_mode(mode)))) {
+				if (radeon_encoder->underscan_hborder != 0)
+					radeon_crtc->h_border = radeon_encoder->underscan_hborder;
+				else
+					radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+				if (radeon_encoder->underscan_vborder != 0)
+					radeon_crtc->v_border = radeon_encoder->underscan_vborder;
+				else
+					radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+				radeon_crtc->rmx_type = RMX_FULL;
+				src_v = crtc->mode.vdisplay;
+				dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
+				src_h = crtc->mode.hdisplay;
+				dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
+			}
+			first = false;
+		} else {
+			if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
+				/* WARNING: Right now this can't happen but
+				 * in the future we need to check that scaling
+				 * are consistent across different encoder
+				 * (ie all encoder can work with the same
+				 *  scaling).
+				 */
+				DRM_ERROR("Scaling not consistent across encoder.\n");
+				return false;
+			}
+		}
+	}
+	if (radeon_crtc->rmx_type != RMX_OFF) {
+		fixed20_12 a, b;
+		a.full = dfixed_const(src_v);
+		b.full = dfixed_const(dst_v);
+		radeon_crtc->vsc.full = dfixed_div(a, b);
+		a.full = dfixed_const(src_h);
+		b.full = dfixed_const(dst_h);
+		radeon_crtc->hsc.full = dfixed_div(a, b);
+	} else {
+		radeon_crtc->vsc.full = dfixed_const(1);
+		radeon_crtc->hsc.full = dfixed_const(1);
+	}
+	return true;
+}
+
+/*
+ * Retrieve current video scanout position of crtc on a given gpu.
+ *
+ * \param dev Device to query.
+ * \param crtc Crtc to query.
+ * \param *vpos Location where vertical scanout position should be stored.
+ * \param *hpos Location where horizontal scanout position should go.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successful.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant but
+ * unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
+{
+	u32 stat_crtc = 0, vbl = 0, position = 0;
+	int vbl_start, vbl_end, vtotal, ret = 0;
+	bool in_vbl = true;
+
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		if (crtc == 0) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC0_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC0_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 1) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC1_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC1_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 2) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC2_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC2_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 3) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC3_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC3_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 4) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC4_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC4_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 5) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC5_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC5_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if (crtc == 0) {
+			vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
+			position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 1) {
+			vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
+			position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	} else {
+		/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
+		if (crtc == 0) {
+			/* Assume vbl_end == 0, get vbl_start from
+			 * upper 16 bits.
+			 */
+			vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
+				RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+			/* Only retrieve vpos from upper 16 bits, set hpos == 0. */
+			position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+			stat_crtc = RREG32(RADEON_CRTC_STATUS);
+			if (!(stat_crtc & 1))
+				in_vbl = false;
+
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 1) {
+			vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
+				RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+			position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+			stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+			if (!(stat_crtc & 1))
+				in_vbl = false;
+
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	}
+
+	/* Decode into vertical and horizontal scanout position. */
+	*vpos = position & 0x1fff;
+	*hpos = (position >> 16) & 0x1fff;
+
+	/* Valid vblank area boundaries from gpu retrieved? */
+	if (vbl > 0) {
+		/* Yes: Decode. */
+		ret |= DRM_SCANOUTPOS_ACCURATE;
+		vbl_start = vbl & 0x1fff;
+		vbl_end = (vbl >> 16) & 0x1fff;
+	}
+	else {
+		/* No: Fake something reasonable which gives at least ok results. */
+		vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+		vbl_end = 0;
+	}
+
+	/* Test scanout position against vblank region. */
+	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
+		in_vbl = false;
+
+	/* Check if inside vblank area and apply corrective offsets:
+	 * vpos will then be >=0 in video scanout area, but negative
+	 * within vblank area, counting down the number of lines until
+	 * start of scanout.
+	 */
+
+	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
+	if (in_vbl && (*vpos >= vbl_start)) {
+		vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+		*vpos = *vpos - vtotal;
+	}
+
+	/* Correct for shifted end of vbl at vbl_end. */
+	*vpos = *vpos - vbl_end;
+
+	/* In vblank? */
+	if (in_vbl)
+		ret |= DRM_SCANOUTPOS_INVBL;
+
+	return ret;
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_display.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_drm.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_drm.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_drm.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,986 @@
+/* $MidnightBSD$ */
+/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Kevin E. Martin <martin at valinux.com>
+ *    Gareth Hughes <gareth at valinux.com>
+ *    Keith Whitwell <keith at tungstengraphics.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_drm.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#ifndef __RADEON_DRM_H__
+#define __RADEON_DRM_H__
+
+#include <dev/drm2/drm.h>
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (radeon_sarea.h)
+ */
+#ifndef __RADEON_SAREA_DEFINES__
+#define __RADEON_SAREA_DEFINES__
+
+/* Old style state flags, required for sarea interface (1.1 and 1.2
+ * clears) and 1.2 drm_vertex2 ioctl.
+ */
+#define RADEON_UPLOAD_CONTEXT		0x00000001
+#define RADEON_UPLOAD_VERTFMT		0x00000002
+#define RADEON_UPLOAD_LINE		0x00000004
+#define RADEON_UPLOAD_BUMPMAP		0x00000008
+#define RADEON_UPLOAD_MASKS		0x00000010
+#define RADEON_UPLOAD_VIEWPORT		0x00000020
+#define RADEON_UPLOAD_SETUP		0x00000040
+#define RADEON_UPLOAD_TCL		0x00000080
+#define RADEON_UPLOAD_MISC		0x00000100
+#define RADEON_UPLOAD_TEX0		0x00000200
+#define RADEON_UPLOAD_TEX1		0x00000400
+#define RADEON_UPLOAD_TEX2		0x00000800
+#define RADEON_UPLOAD_TEX0IMAGES	0x00001000
+#define RADEON_UPLOAD_TEX1IMAGES	0x00002000
+#define RADEON_UPLOAD_TEX2IMAGES	0x00004000
+#define RADEON_UPLOAD_CLIPRECTS		0x00008000	/* handled client-side */
+#define RADEON_REQUIRE_QUIESCENCE	0x00010000
+#define RADEON_UPLOAD_ZBIAS		0x00020000	/* version 1.2 and newer */
+#define RADEON_UPLOAD_ALL		0x003effff
+#define RADEON_UPLOAD_CONTEXT_ALL       0x003e01ff
+
+/* New style per-packet identifiers for use in cmd_buffer ioctl with
+ * the RADEON_EMIT_PACKET command.  Comments relate new packets to old
+ * state bits and the packet size:
+ */
+#define RADEON_EMIT_PP_MISC                         0	/* context/7 */
+#define RADEON_EMIT_PP_CNTL                         1	/* context/3 */
+#define RADEON_EMIT_RB3D_COLORPITCH                 2	/* context/1 */
+#define RADEON_EMIT_RE_LINE_PATTERN                 3	/* line/2 */
+#define RADEON_EMIT_SE_LINE_WIDTH                   4	/* line/1 */
+#define RADEON_EMIT_PP_LUM_MATRIX                   5	/* bumpmap/1 */
+#define RADEON_EMIT_PP_ROT_MATRIX_0                 6	/* bumpmap/2 */
+#define RADEON_EMIT_RB3D_STENCILREFMASK             7	/* masks/3 */
+#define RADEON_EMIT_SE_VPORT_XSCALE                 8	/* viewport/6 */
+#define RADEON_EMIT_SE_CNTL                         9	/* setup/2 */
+#define RADEON_EMIT_SE_CNTL_STATUS                  10	/* setup/1 */
+#define RADEON_EMIT_RE_MISC                         11	/* misc/1 */
+#define RADEON_EMIT_PP_TXFILTER_0                   12	/* tex0/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_0               13	/* tex0/1 */
+#define RADEON_EMIT_PP_TXFILTER_1                   14	/* tex1/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_1               15	/* tex1/1 */
+#define RADEON_EMIT_PP_TXFILTER_2                   16	/* tex2/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_2               17	/* tex2/1 */
+#define RADEON_EMIT_SE_ZBIAS_FACTOR                 18	/* zbias/2 */
+#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT           19	/* tcl/11 */
+#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED   20	/* material/17 */
+#define R200_EMIT_PP_TXCBLEND_0                     21	/* tex0/4 */
+#define R200_EMIT_PP_TXCBLEND_1                     22	/* tex1/4 */
+#define R200_EMIT_PP_TXCBLEND_2                     23	/* tex2/4 */
+#define R200_EMIT_PP_TXCBLEND_3                     24	/* tex3/4 */
+#define R200_EMIT_PP_TXCBLEND_4                     25	/* tex4/4 */
+#define R200_EMIT_PP_TXCBLEND_5                     26	/* tex5/4 */
+#define R200_EMIT_PP_TXCBLEND_6                     27	/* /4 */
+#define R200_EMIT_PP_TXCBLEND_7                     28	/* /4 */
+#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0             29	/* tcl/7 */
+#define R200_EMIT_TFACTOR_0                         30	/* tf/7 */
+#define R200_EMIT_VTX_FMT_0                         31	/* vtx/5 */
+#define R200_EMIT_VAP_CTL                           32	/* vap/1 */
+#define R200_EMIT_MATRIX_SELECT_0                   33	/* msl/5 */
+#define R200_EMIT_TEX_PROC_CTL_2                    34	/* tcg/5 */
+#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL            35	/* tcl/1 */
+#define R200_EMIT_PP_TXFILTER_0                     36	/* tex0/6 */
+#define R200_EMIT_PP_TXFILTER_1                     37	/* tex1/6 */
+#define R200_EMIT_PP_TXFILTER_2                     38	/* tex2/6 */
+#define R200_EMIT_PP_TXFILTER_3                     39	/* tex3/6 */
+#define R200_EMIT_PP_TXFILTER_4                     40	/* tex4/6 */
+#define R200_EMIT_PP_TXFILTER_5                     41	/* tex5/6 */
+#define R200_EMIT_PP_TXOFFSET_0                     42	/* tex0/1 */
+#define R200_EMIT_PP_TXOFFSET_1                     43	/* tex1/1 */
+#define R200_EMIT_PP_TXOFFSET_2                     44	/* tex2/1 */
+#define R200_EMIT_PP_TXOFFSET_3                     45	/* tex3/1 */
+#define R200_EMIT_PP_TXOFFSET_4                     46	/* tex4/1 */
+#define R200_EMIT_PP_TXOFFSET_5                     47	/* tex5/1 */
+#define R200_EMIT_VTE_CNTL                          48	/* vte/1 */
+#define R200_EMIT_OUTPUT_VTX_COMP_SEL               49	/* vtx/1 */
+#define R200_EMIT_PP_TAM_DEBUG3                     50	/* tam/1 */
+#define R200_EMIT_PP_CNTL_X                         51	/* cst/1 */
+#define R200_EMIT_RB3D_DEPTHXY_OFFSET               52	/* cst/1 */
+#define R200_EMIT_RE_AUX_SCISSOR_CNTL               53	/* cst/1 */
+#define R200_EMIT_RE_SCISSOR_TL_0                   54	/* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_1                   55	/* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_2                   56	/* cst/2 */
+#define R200_EMIT_SE_VAP_CNTL_STATUS                57	/* cst/1 */
+#define R200_EMIT_SE_VTX_STATE_CNTL                 58	/* cst/1 */
+#define R200_EMIT_RE_POINTSIZE                      59	/* cst/1 */
+#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0       60	/* cst/4 */
+#define R200_EMIT_PP_CUBIC_FACES_0                  61
+#define R200_EMIT_PP_CUBIC_OFFSETS_0                62
+#define R200_EMIT_PP_CUBIC_FACES_1                  63
+#define R200_EMIT_PP_CUBIC_OFFSETS_1                64
+#define R200_EMIT_PP_CUBIC_FACES_2                  65
+#define R200_EMIT_PP_CUBIC_OFFSETS_2                66
+#define R200_EMIT_PP_CUBIC_FACES_3                  67
+#define R200_EMIT_PP_CUBIC_OFFSETS_3                68
+#define R200_EMIT_PP_CUBIC_FACES_4                  69
+#define R200_EMIT_PP_CUBIC_OFFSETS_4                70
+#define R200_EMIT_PP_CUBIC_FACES_5                  71
+#define R200_EMIT_PP_CUBIC_OFFSETS_5                72
+#define RADEON_EMIT_PP_TEX_SIZE_0                   73
+#define RADEON_EMIT_PP_TEX_SIZE_1                   74
+#define RADEON_EMIT_PP_TEX_SIZE_2                   75
+#define R200_EMIT_RB3D_BLENDCOLOR                   76
+#define R200_EMIT_TCL_POINT_SPRITE_CNTL             77
+#define RADEON_EMIT_PP_CUBIC_FACES_0                78
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0             79
+#define RADEON_EMIT_PP_CUBIC_FACES_1                80
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1             81
+#define RADEON_EMIT_PP_CUBIC_FACES_2                82
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2             83
+#define R200_EMIT_PP_TRI_PERF_CNTL                  84
+#define R200_EMIT_PP_AFS_0                          85
+#define R200_EMIT_PP_AFS_1                          86
+#define R200_EMIT_ATF_TFACTOR                       87
+#define R200_EMIT_PP_TXCTLALL_0                     88
+#define R200_EMIT_PP_TXCTLALL_1                     89
+#define R200_EMIT_PP_TXCTLALL_2                     90
+#define R200_EMIT_PP_TXCTLALL_3                     91
+#define R200_EMIT_PP_TXCTLALL_4                     92
+#define R200_EMIT_PP_TXCTLALL_5                     93
+#define R200_EMIT_VAP_PVS_CNTL                      94
+#define RADEON_MAX_STATE_PACKETS                    95
+
+/* Commands understood by cmd_buffer ioctl.  More can be added but
+ * obviously these can't be removed or changed:
+ */
+#define RADEON_CMD_PACKET      1	/* emit one of the register packets above */
+#define RADEON_CMD_SCALARS     2	/* emit scalar data */
+#define RADEON_CMD_VECTORS     3	/* emit vector data */
+#define RADEON_CMD_DMA_DISCARD 4	/* discard current dma buf */
+#define RADEON_CMD_PACKET3     5	/* emit hw packet */
+#define RADEON_CMD_PACKET3_CLIP 6	/* emit hw packet wrapped in cliprects */
+#define RADEON_CMD_SCALARS2     7	/* r200 stopgap */
+#define RADEON_CMD_WAIT         8	/* emit hw wait commands -- note:
+					 *  doesn't make the cpu wait, just
+					 *  the graphics hardware */
+#define RADEON_CMD_VECLINEAR	9       /* another r200 stopgap */
+
+typedef union {
+	int i;
+	struct {
+		unsigned char cmd_type, pad0, pad1, pad2;
+	} header;
+	struct {
+		unsigned char cmd_type, packet_id, pad0, pad1;
+	} packet;
+	struct {
+		unsigned char cmd_type, offset, stride, count;
+	} scalars;
+	struct {
+		unsigned char cmd_type, offset, stride, count;
+	} vectors;
+	struct {
+		unsigned char cmd_type, addr_lo, addr_hi, count;
+	} veclinear;
+	struct {
+		unsigned char cmd_type, buf_idx, pad0, pad1;
+	} dma;
+	struct {
+		unsigned char cmd_type, flags, pad0, pad1;
+	} wait;
+} drm_radeon_cmd_header_t;
+
+#define RADEON_WAIT_2D  0x1
+#define RADEON_WAIT_3D  0x2
+
+/* Allowed parameters for R300_CMD_PACKET3
+ */
+#define R300_CMD_PACKET3_CLEAR		0
+#define R300_CMD_PACKET3_RAW		1
+
+/* Commands understood by cmd_buffer ioctl for R300.
+ * The interface has not been stabilized, so some of these may be removed
+ * and eventually reordered before stabilization.
+ */
+#define R300_CMD_PACKET0		1
+#define R300_CMD_VPU			2	/* emit vertex program upload */
+#define R300_CMD_PACKET3		3	/* emit a packet3 */
+#define R300_CMD_END3D			4	/* emit sequence ending 3d rendering */
+#define R300_CMD_CP_DELAY		5
+#define R300_CMD_DMA_DISCARD		6
+#define R300_CMD_WAIT			7
+#	define R300_WAIT_2D		0x1
+#	define R300_WAIT_3D		0x2
+/* these two defines are DOING IT WRONG - however
+ * we have userspace which relies on using these.
+ * The wait interface is backwards compat new 
+ * code should use the NEW_WAIT defines below
+ * THESE ARE NOT BIT FIELDS
+ */
+#	define R300_WAIT_2D_CLEAN	0x3
+#	define R300_WAIT_3D_CLEAN	0x4
+
+#	define R300_NEW_WAIT_2D_3D	0x3
+#	define R300_NEW_WAIT_2D_2D_CLEAN	0x4
+#	define R300_NEW_WAIT_3D_3D_CLEAN	0x6
+#	define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN	0x8
+
+#define R300_CMD_SCRATCH		8
+#define R300_CMD_R500FP                 9
+
+typedef union {
+	unsigned int u;
+	struct {
+		unsigned char cmd_type, pad0, pad1, pad2;
+	} header;
+	struct {
+		unsigned char cmd_type, count, reglo, reghi;
+	} packet0;
+	struct {
+		unsigned char cmd_type, count, adrlo, adrhi;
+	} vpu;
+	struct {
+		unsigned char cmd_type, packet, pad0, pad1;
+	} packet3;
+	struct {
+		unsigned char cmd_type, packet;
+		unsigned short count;	/* amount of packet2 to emit */
+	} delay;
+	struct {
+		unsigned char cmd_type, buf_idx, pad0, pad1;
+	} dma;
+	struct {
+		unsigned char cmd_type, flags, pad0, pad1;
+	} wait;
+	struct {
+		unsigned char cmd_type, reg, n_bufs, flags;
+	} scratch;
+	struct {
+		unsigned char cmd_type, count, adrlo, adrhi_flags;
+	} r500fp;
+} drm_r300_cmd_header_t;
+
+#define RADEON_FRONT			0x1
+#define RADEON_BACK			0x2
+#define RADEON_DEPTH			0x4
+#define RADEON_STENCIL			0x8
+#define RADEON_CLEAR_FASTZ		0x80000000
+#define RADEON_USE_HIERZ		0x40000000
+#define RADEON_USE_COMP_ZBUF		0x20000000
+
+#define R500FP_CONSTANT_TYPE  (1 << 1)
+#define R500FP_CONSTANT_CLAMP (1 << 2)
+
+/* Primitive types
+ */
+#define RADEON_POINTS			0x1
+#define RADEON_LINES			0x2
+#define RADEON_LINE_STRIP		0x3
+#define RADEON_TRIANGLES		0x4
+#define RADEON_TRIANGLE_FAN		0x5
+#define RADEON_TRIANGLE_STRIP		0x6
+
+/* Vertex/indirect buffer size
+ */
+#define RADEON_BUFFER_SIZE		65536
+
+/* Byte offsets for indirect buffer data
+ */
+#define RADEON_INDEX_PRIM_OFFSET	20
+
+#define RADEON_SCRATCH_REG_OFFSET	32
+
+#define R600_SCRATCH_REG_OFFSET         256
+
+#define RADEON_NR_SAREA_CLIPRECTS	12
+
+/* There are 2 heaps (local/GART).  Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define RADEON_LOCAL_TEX_HEAP		0
+#define RADEON_GART_TEX_HEAP		1
+#define RADEON_NR_TEX_HEAPS		2
+#define RADEON_NR_TEX_REGIONS		64
+#define RADEON_LOG_TEX_GRANULARITY	16
+
+#define RADEON_MAX_TEXTURE_LEVELS	12
+#define RADEON_MAX_TEXTURE_UNITS	3
+
+#define RADEON_MAX_SURFACES		8
+
+/* Blits have strict offset rules.  All blit offset must be aligned on
+ * a 1K-byte boundary.
+ */
+#define RADEON_OFFSET_SHIFT             10
+#define RADEON_OFFSET_ALIGN             (1 << RADEON_OFFSET_SHIFT)
+#define RADEON_OFFSET_MASK              (RADEON_OFFSET_ALIGN - 1)
+
+#endif				/* __RADEON_SAREA_DEFINES__ */
+
+typedef struct {
+	unsigned int red;
+	unsigned int green;
+	unsigned int blue;
+	unsigned int alpha;
+} radeon_color_regs_t;
+
+typedef struct {
+	/* Context state */
+	unsigned int pp_misc;	/* 0x1c14 */
+	unsigned int pp_fog_color;
+	unsigned int re_solid_color;
+	unsigned int rb3d_blendcntl;
+	unsigned int rb3d_depthoffset;
+	unsigned int rb3d_depthpitch;
+	unsigned int rb3d_zstencilcntl;
+
+	unsigned int pp_cntl;	/* 0x1c38 */
+	unsigned int rb3d_cntl;
+	unsigned int rb3d_coloroffset;
+	unsigned int re_width_height;
+	unsigned int rb3d_colorpitch;
+	unsigned int se_cntl;
+
+	/* Vertex format state */
+	unsigned int se_coord_fmt;	/* 0x1c50 */
+
+	/* Line state */
+	unsigned int re_line_pattern;	/* 0x1cd0 */
+	unsigned int re_line_state;
+
+	unsigned int se_line_width;	/* 0x1db8 */
+
+	/* Bumpmap state */
+	unsigned int pp_lum_matrix;	/* 0x1d00 */
+
+	unsigned int pp_rot_matrix_0;	/* 0x1d58 */
+	unsigned int pp_rot_matrix_1;
+
+	/* Mask state */
+	unsigned int rb3d_stencilrefmask;	/* 0x1d7c */
+	unsigned int rb3d_ropcntl;
+	unsigned int rb3d_planemask;
+
+	/* Viewport state */
+	unsigned int se_vport_xscale;	/* 0x1d98 */
+	unsigned int se_vport_xoffset;
+	unsigned int se_vport_yscale;
+	unsigned int se_vport_yoffset;
+	unsigned int se_vport_zscale;
+	unsigned int se_vport_zoffset;
+
+	/* Setup state */
+	unsigned int se_cntl_status;	/* 0x2140 */
+
+	/* Misc state */
+	unsigned int re_top_left;	/* 0x26c0 */
+	unsigned int re_misc;
+} drm_radeon_context_regs_t;
+
+typedef struct {
+	/* Zbias state */
+	unsigned int se_zbias_factor;	/* 0x1dac */
+	unsigned int se_zbias_constant;
+} drm_radeon_context2_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+	unsigned int pp_txfilter;
+	unsigned int pp_txformat;
+	unsigned int pp_txoffset;
+	unsigned int pp_txcblend;
+	unsigned int pp_txablend;
+	unsigned int pp_tfactor;
+	unsigned int pp_border_color;
+} drm_radeon_texture_regs_t;
+
+typedef struct {
+	unsigned int start;
+	unsigned int finish;
+	unsigned int prim:8;
+	unsigned int stateidx:8;
+	unsigned int numverts:16;	/* overloaded as offset/64 for elt prims */
+	unsigned int vc_format;	/* vertex format */
+} drm_radeon_prim_t;
+
+typedef struct {
+	drm_radeon_context_regs_t context;
+	drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
+	drm_radeon_context2_regs_t context2;
+	unsigned int dirty;
+} drm_radeon_state_t;
+
+typedef struct {
+	/* The channel for communication of state information to the
+	 * kernel on firing a vertex buffer with either of the
+	 * obsoleted vertex/index ioctls.
+	 */
+	drm_radeon_context_regs_t context_state;
+	drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
+	unsigned int dirty;
+	unsigned int vertsize;
+	unsigned int vc_format;
+
+	/* The current cliprects, or a subset thereof.
+	 */
+	struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
+	unsigned int nbox;
+
+	/* Counters for client-side throttling of rendering clients.
+	 */
+	unsigned int last_frame;
+	unsigned int last_dispatch;
+	unsigned int last_clear;
+
+	struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
+						       1];
+	unsigned int tex_age[RADEON_NR_TEX_HEAPS];
+	int ctx_owner;
+	int pfState;		/* number of 3d windows (0,1,2ormore) */
+	int pfCurrentPage;	/* which buffer is being displayed? */
+	int crtc2_base;		/* CRTC2 frame offset */
+	int tiling_enabled;	/* set by drm, read by 2d + 3d clients */
+} drm_radeon_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmRadeon.h)
+ *
+ * KW: actually it's illegal to change any of this (backwards compatibility).
+ */
+
+/* Radeon specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_RADEON_CP_INIT    0x00
+#define DRM_RADEON_CP_START   0x01
+#define DRM_RADEON_CP_STOP    0x02
+#define DRM_RADEON_CP_RESET   0x03
+#define DRM_RADEON_CP_IDLE    0x04
+#define DRM_RADEON_RESET      0x05
+#define DRM_RADEON_FULLSCREEN 0x06
+#define DRM_RADEON_SWAP       0x07
+#define DRM_RADEON_CLEAR      0x08
+#define DRM_RADEON_VERTEX     0x09
+#define DRM_RADEON_INDICES    0x0A
+#define DRM_RADEON_NOT_USED
+#define DRM_RADEON_STIPPLE    0x0C
+#define DRM_RADEON_INDIRECT   0x0D
+#define DRM_RADEON_TEXTURE    0x0E
+#define DRM_RADEON_VERTEX2    0x0F
+#define DRM_RADEON_CMDBUF     0x10
+#define DRM_RADEON_GETPARAM   0x11
+#define DRM_RADEON_FLIP       0x12
+#define DRM_RADEON_ALLOC      0x13
+#define DRM_RADEON_FREE       0x14
+#define DRM_RADEON_INIT_HEAP  0x15
+#define DRM_RADEON_IRQ_EMIT   0x16
+#define DRM_RADEON_IRQ_WAIT   0x17
+#define DRM_RADEON_CP_RESUME  0x18
+#define DRM_RADEON_SETPARAM   0x19
+#define DRM_RADEON_SURF_ALLOC 0x1a
+#define DRM_RADEON_SURF_FREE  0x1b
+/* KMS ioctl */
+#define DRM_RADEON_GEM_INFO		0x1c
+#define DRM_RADEON_GEM_CREATE		0x1d
+#define DRM_RADEON_GEM_MMAP		0x1e
+#define DRM_RADEON_GEM_PREAD		0x21
+#define DRM_RADEON_GEM_PWRITE		0x22
+#define DRM_RADEON_GEM_SET_DOMAIN	0x23
+#define DRM_RADEON_GEM_WAIT_IDLE	0x24
+#define DRM_RADEON_CS			0x26
+#define DRM_RADEON_INFO			0x27
+#define DRM_RADEON_GEM_SET_TILING	0x28
+#define DRM_RADEON_GEM_GET_TILING	0x29
+#define DRM_RADEON_GEM_BUSY		0x2a
+#define DRM_RADEON_GEM_VA		0x2b
+
+#define DRM_IOCTL_RADEON_CP_INIT    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
+#define DRM_IOCTL_RADEON_CP_START   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_START)
+#define DRM_IOCTL_RADEON_CP_STOP    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
+#define DRM_IOCTL_RADEON_CP_RESET   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
+#define DRM_IOCTL_RADEON_CP_IDLE    DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
+#define DRM_IOCTL_RADEON_RESET      DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_RESET)
+#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
+#define DRM_IOCTL_RADEON_SWAP       DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_SWAP)
+#define DRM_IOCTL_RADEON_CLEAR      DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
+#define DRM_IOCTL_RADEON_VERTEX     DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
+#define DRM_IOCTL_RADEON_INDICES    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
+#define DRM_IOCTL_RADEON_STIPPLE    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
+#define DRM_IOCTL_RADEON_INDIRECT   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
+#define DRM_IOCTL_RADEON_TEXTURE    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
+#define DRM_IOCTL_RADEON_VERTEX2    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
+#define DRM_IOCTL_RADEON_CMDBUF     DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
+#define DRM_IOCTL_RADEON_GETPARAM   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
+#define DRM_IOCTL_RADEON_FLIP       DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_FLIP)
+#define DRM_IOCTL_RADEON_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
+#define DRM_IOCTL_RADEON_FREE       DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
+#define DRM_IOCTL_RADEON_INIT_HEAP  DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
+#define DRM_IOCTL_RADEON_IRQ_EMIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
+#define DRM_IOCTL_RADEON_IRQ_WAIT   DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
+#define DRM_IOCTL_RADEON_CP_RESUME  DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
+#define DRM_IOCTL_RADEON_SETPARAM   DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
+#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
+#define DRM_IOCTL_RADEON_SURF_FREE  DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
+/* KMS */
+#define DRM_IOCTL_RADEON_GEM_INFO	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
+#define DRM_IOCTL_RADEON_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
+#define DRM_IOCTL_RADEON_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
+#define DRM_IOCTL_RADEON_GEM_PREAD	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
+#define DRM_IOCTL_RADEON_GEM_PWRITE	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
+#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
+#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE	DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
+#define DRM_IOCTL_RADEON_CS		DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
+#define DRM_IOCTL_RADEON_INFO		DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
+#define DRM_IOCTL_RADEON_GEM_SET_TILING	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+#define DRM_IOCTL_RADEON_GEM_GET_TILING	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+#define DRM_IOCTL_RADEON_GEM_BUSY	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
+#define DRM_IOCTL_RADEON_GEM_VA		DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
+
+typedef struct drm_radeon_init {
+	enum {
+		RADEON_INIT_CP = 0x01,
+		RADEON_CLEANUP_CP = 0x02,
+		RADEON_INIT_R200_CP = 0x03,
+		RADEON_INIT_R300_CP = 0x04,
+		RADEON_INIT_R600_CP = 0x05
+	} func;
+	unsigned long sarea_priv_offset;
+	int is_pci;
+	int cp_mode;
+	int gart_size;
+	int ring_size;
+	int usec_timeout;
+
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+
+	unsigned long fb_offset;
+	unsigned long mmio_offset;
+	unsigned long ring_offset;
+	unsigned long ring_rptr_offset;
+	unsigned long buffers_offset;
+	unsigned long gart_textures_offset;
+} drm_radeon_init_t;
+
+typedef struct drm_radeon_cp_stop {
+	int flush;
+	int idle;
+} drm_radeon_cp_stop_t;
+
+typedef struct drm_radeon_fullscreen {
+	enum {
+		RADEON_INIT_FULLSCREEN = 0x01,
+		RADEON_CLEANUP_FULLSCREEN = 0x02
+	} func;
+} drm_radeon_fullscreen_t;
+
+#define CLEAR_X1	0
+#define CLEAR_Y1	1
+#define CLEAR_X2	2
+#define CLEAR_Y2	3
+#define CLEAR_DEPTH	4
+
+typedef union drm_radeon_clear_rect {
+	float f[5];
+	unsigned int ui[5];
+} drm_radeon_clear_rect_t;
+
+typedef struct drm_radeon_clear {
+	unsigned int flags;
+	unsigned int clear_color;
+	unsigned int clear_depth;
+	unsigned int color_mask;
+	unsigned int depth_mask;	/* misnamed field:  should be stencil */
+	drm_radeon_clear_rect_t __user *depth_boxes;
+} drm_radeon_clear_t;
+
+typedef struct drm_radeon_vertex {
+	int prim;
+	int idx;		/* Index of vertex buffer */
+	int count;		/* Number of vertices in buffer */
+	int discard;		/* Client finished with buffer? */
+} drm_radeon_vertex_t;
+
+typedef struct drm_radeon_indices {
+	int prim;
+	int idx;
+	int start;
+	int end;
+	int discard;		/* Client finished with buffer? */
+} drm_radeon_indices_t;
+
+/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
+ *      - allows multiple primitives and state changes in a single ioctl
+ *      - supports driver change to emit native primitives
+ */
+typedef struct drm_radeon_vertex2 {
+	int idx;		/* Index of vertex buffer */
+	int discard;		/* Client finished with buffer? */
+	int nr_states;
+	drm_radeon_state_t __user *state;
+	int nr_prims;
+	drm_radeon_prim_t __user *prim;
+} drm_radeon_vertex2_t;
+
+/* v1.3 - obsoletes drm_radeon_vertex2
+ *      - allows arbitrarily large cliprect list
+ *      - allows updating of tcl packet, vector and scalar state
+ *      - allows memory-efficient description of state updates
+ *      - allows state to be emitted without a primitive
+ *           (for clears, ctx switches)
+ *      - allows more than one dma buffer to be referenced per ioctl
+ *      - supports tcl driver
+ *      - may be extended in future versions with new cmd types, packets
+ */
+typedef struct drm_radeon_cmd_buffer {
+	int bufsz;
+	char __user *buf;
+	int nbox;
+	struct drm_clip_rect __user *boxes;
+} drm_radeon_cmd_buffer_t;
+
+typedef struct drm_radeon_tex_image {
+	unsigned int x, y;	/* Blit coordinates */
+	unsigned int width, height;
+	const void __user *data;
+} drm_radeon_tex_image_t;
+
+typedef struct drm_radeon_texture {
+	unsigned int offset;
+	int pitch;
+	int format;
+	int width;		/* Texture image coordinates */
+	int height;
+	drm_radeon_tex_image_t __user *image;
+} drm_radeon_texture_t;
+
+typedef struct drm_radeon_stipple {
+	unsigned int __user *mask;
+} drm_radeon_stipple_t;
+
+typedef struct drm_radeon_indirect {
+	int idx;
+	int start;
+	int end;
+	int discard;
+} drm_radeon_indirect_t;
+
+/* enum for card type parameters */
+#define RADEON_CARD_PCI 0
+#define RADEON_CARD_AGP 1
+#define RADEON_CARD_PCIE 2
+
+/* 1.3: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define RADEON_PARAM_GART_BUFFER_OFFSET    1	/* card offset of 1st GART buffer */
+#define RADEON_PARAM_LAST_FRAME            2
+#define RADEON_PARAM_LAST_DISPATCH         3
+#define RADEON_PARAM_LAST_CLEAR            4
+/* Added with DRM version 1.6. */
+#define RADEON_PARAM_IRQ_NR                5
+#define RADEON_PARAM_GART_BASE             6	/* card offset of GART base */
+/* Added with DRM version 1.8. */
+#define RADEON_PARAM_REGISTER_HANDLE       7	/* for drmMap() */
+#define RADEON_PARAM_STATUS_HANDLE         8
+#define RADEON_PARAM_SAREA_HANDLE          9
+#define RADEON_PARAM_GART_TEX_HANDLE       10
+#define RADEON_PARAM_SCRATCH_OFFSET        11
+#define RADEON_PARAM_CARD_TYPE             12
+#define RADEON_PARAM_VBLANK_CRTC           13   /* VBLANK CRTC */
+#define RADEON_PARAM_FB_LOCATION           14   /* FB location */
+#define RADEON_PARAM_NUM_GB_PIPES          15   /* num GB pipes */
+#define RADEON_PARAM_DEVICE_ID             16
+#define RADEON_PARAM_NUM_Z_PIPES           17   /* num Z pipes */
+
+typedef struct drm_radeon_getparam {
+	int param;
+	void __user *value;
+} drm_radeon_getparam_t;
+
+/* 1.6: Set up a memory manager for regions of shared memory:
+ */
+#define RADEON_MEM_REGION_GART 1
+#define RADEON_MEM_REGION_FB   2
+
+typedef struct drm_radeon_mem_alloc {
+	int region;
+	int alignment;
+	int size;
+	int __user *region_offset;	/* offset from start of fb or GART */
+} drm_radeon_mem_alloc_t;
+
+typedef struct drm_radeon_mem_free {
+	int region;
+	int region_offset;
+} drm_radeon_mem_free_t;
+
+typedef struct drm_radeon_mem_init_heap {
+	int region;
+	int size;
+	int start;
+} drm_radeon_mem_init_heap_t;
+
+/* 1.6: Userspace can request & wait on irq's:
+ */
+typedef struct drm_radeon_irq_emit {
+	int __user *irq_seq;
+} drm_radeon_irq_emit_t;
+
+typedef struct drm_radeon_irq_wait {
+	int irq_seq;
+} drm_radeon_irq_wait_t;
+
+/* 1.10: Clients tell the DRM where they think the framebuffer is located in
+ * the card's address space, via a new generic ioctl to set parameters
+ */
+
+typedef struct drm_radeon_setparam {
+	unsigned int param;
+	__s64 value;
+} drm_radeon_setparam_t;
+
+#define RADEON_SETPARAM_FB_LOCATION    1	/* determined framebuffer location */
+#define RADEON_SETPARAM_SWITCH_TILING  2	/* enable/disable color tiling */
+#define RADEON_SETPARAM_PCIGART_LOCATION 3	/* PCI Gart Location */
+#define RADEON_SETPARAM_NEW_MEMMAP 4		/* Use new memory map */
+#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5    /* PCI GART Table Size */
+#define RADEON_SETPARAM_VBLANK_CRTC 6           /* VBLANK CRTC */
+/* 1.14: Clients can allocate/free a surface
+ */
+typedef struct drm_radeon_surface_alloc {
+	unsigned int address;
+	unsigned int size;
+	unsigned int flags;
+} drm_radeon_surface_alloc_t;
+
+typedef struct drm_radeon_surface_free {
+	unsigned int address;
+} drm_radeon_surface_free_t;
+
+#define	DRM_RADEON_VBLANK_CRTC1		1
+#define	DRM_RADEON_VBLANK_CRTC2		2
+
+/*
+ * Kernel modesetting world below.
+ */
+#define RADEON_GEM_DOMAIN_CPU		0x1
+#define RADEON_GEM_DOMAIN_GTT		0x2
+#define RADEON_GEM_DOMAIN_VRAM		0x4
+
+struct drm_radeon_gem_info {
+	uint64_t	gart_size;
+	uint64_t	vram_size;
+	uint64_t	vram_visible;
+};
+
+#define RADEON_GEM_NO_BACKING_STORE 1
+
+struct drm_radeon_gem_create {
+	uint64_t	size;
+	uint64_t	alignment;
+	uint32_t	handle;
+	uint32_t	initial_domain;
+	uint32_t	flags;
+};
+
+#define RADEON_TILING_MACRO				0x1
+#define RADEON_TILING_MICRO				0x2
+#define RADEON_TILING_SWAP_16BIT			0x4
+#define RADEON_TILING_SWAP_32BIT			0x8
+/* this object requires a surface when mapped - i.e. front buffer */
+#define RADEON_TILING_SURFACE				0x10
+#define RADEON_TILING_MICRO_SQUARE			0x20
+#define RADEON_TILING_EG_BANKW_SHIFT			8
+#define RADEON_TILING_EG_BANKW_MASK			0xf
+#define RADEON_TILING_EG_BANKH_SHIFT			12
+#define RADEON_TILING_EG_BANKH_MASK			0xf
+#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT	16
+#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK		0xf
+#define RADEON_TILING_EG_TILE_SPLIT_SHIFT		24
+#define RADEON_TILING_EG_TILE_SPLIT_MASK		0xf
+#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT	28
+#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK	0xf
+
+struct drm_radeon_gem_set_tiling {
+	uint32_t	handle;
+	uint32_t	tiling_flags;
+	uint32_t	pitch;
+};
+
+struct drm_radeon_gem_get_tiling {
+	uint32_t	handle;
+	uint32_t	tiling_flags;
+	uint32_t	pitch;
+};
+
+struct drm_radeon_gem_mmap {
+	uint32_t	handle;
+	uint32_t	pad;
+	uint64_t	offset;
+	uint64_t	size;
+	uint64_t	addr_ptr;
+};
+
+struct drm_radeon_gem_set_domain {
+	uint32_t	handle;
+	uint32_t	read_domains;
+	uint32_t	write_domain;
+};
+
+struct drm_radeon_gem_wait_idle {
+	uint32_t	handle;
+	uint32_t	pad;
+};
+
+struct drm_radeon_gem_busy {
+	uint32_t	handle;
+	uint32_t        domain;
+};
+
+struct drm_radeon_gem_pread {
+	/** Handle for the object being read. */
+	uint32_t handle;
+	uint32_t pad;
+	/** Offset into the object to read from */
+	uint64_t offset;
+	/** Length of data to read */
+	uint64_t size;
+	/** Pointer to write the data into. */
+	/* void *, but pointers are not 32/64 compatible */
+	uint64_t data_ptr;
+};
+
+struct drm_radeon_gem_pwrite {
+	/** Handle for the object being written to. */
+	uint32_t handle;
+	uint32_t pad;
+	/** Offset into the object to write to */
+	uint64_t offset;
+	/** Length of data to write */
+	uint64_t size;
+	/** Pointer to read the data from. */
+	/* void *, but pointers are not 32/64 compatible */
+	uint64_t data_ptr;
+};
+
+#define RADEON_VA_MAP			1
+#define RADEON_VA_UNMAP			2
+
+#define RADEON_VA_RESULT_OK		0
+#define RADEON_VA_RESULT_ERROR		1
+#define RADEON_VA_RESULT_VA_EXIST	2
+
+#define RADEON_VM_PAGE_VALID		(1 << 0)
+#define RADEON_VM_PAGE_READABLE		(1 << 1)
+#define RADEON_VM_PAGE_WRITEABLE	(1 << 2)
+#define RADEON_VM_PAGE_SYSTEM		(1 << 3)
+#define RADEON_VM_PAGE_SNOOPED		(1 << 4)
+
+struct drm_radeon_gem_va {
+	uint32_t		handle;
+	uint32_t		operation;
+	uint32_t		vm_id;
+	uint32_t		flags;
+	uint64_t		offset;
+};
+
+#define RADEON_CHUNK_ID_RELOCS	0x01
+#define RADEON_CHUNK_ID_IB	0x02
+#define RADEON_CHUNK_ID_FLAGS	0x03
+#define RADEON_CHUNK_ID_CONST_IB	0x04
+
+/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
+#define RADEON_CS_KEEP_TILING_FLAGS 0x01
+#define RADEON_CS_USE_VM            0x02
+#define RADEON_CS_END_OF_FRAME      0x04 /* a hint from userspace which CS is the last one */
+/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
+#define RADEON_CS_RING_GFX          0
+#define RADEON_CS_RING_COMPUTE      1
+#define RADEON_CS_RING_DMA          2
+/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
+/* 0 = normal, + = higher priority, - = lower priority */
+
+struct drm_radeon_cs_chunk {
+	uint32_t		chunk_id;
+	uint32_t		length_dw;
+	uint64_t		chunk_data;
+};
+
+/* drm_radeon_cs_reloc.flags */
+
+struct drm_radeon_cs_reloc {
+	uint32_t		handle;
+	uint32_t		read_domains;
+	uint32_t		write_domain;
+	uint32_t		flags;
+};
+
+struct drm_radeon_cs {
+	uint32_t		num_chunks;
+	uint32_t		cs_id;
+	/* this points to uint64_t * which point to cs chunks */
+	uint64_t		chunks;
+	/* updates to the limits after this CS ioctl */
+	uint64_t		gart_limit;
+	uint64_t		vram_limit;
+};
+
+#define RADEON_INFO_DEVICE_ID		0x00
+#define RADEON_INFO_NUM_GB_PIPES	0x01
+#define RADEON_INFO_NUM_Z_PIPES 	0x02
+#define RADEON_INFO_ACCEL_WORKING	0x03
+#define RADEON_INFO_CRTC_FROM_ID	0x04
+#define RADEON_INFO_ACCEL_WORKING2	0x05
+#define RADEON_INFO_TILING_CONFIG	0x06
+#define RADEON_INFO_WANT_HYPERZ		0x07
+#define RADEON_INFO_WANT_CMASK		0x08 /* get access to CMASK on r300 */
+#define RADEON_INFO_CLOCK_CRYSTAL_FREQ	0x09 /* clock crystal frequency */
+#define RADEON_INFO_NUM_BACKENDS	0x0a /* DB/backends for r600+ - need for OQ */
+#define RADEON_INFO_NUM_TILE_PIPES	0x0b /* tile pipes for r600+ */
+#define RADEON_INFO_FUSION_GART_WORKING	0x0c /* fusion writes to GTT were broken before this */
+#define RADEON_INFO_BACKEND_MAP		0x0d /* pipe to backend map, needed by mesa */
+/* virtual address start, va < start are reserved by the kernel */
+#define RADEON_INFO_VA_START		0x0e
+/* maximum size of ib using the virtual memory cs */
+#define RADEON_INFO_IB_VM_MAX_SIZE	0x0f
+/* max pipes - needed for compute shaders */
+#define RADEON_INFO_MAX_PIPES		0x10
+/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
+#define RADEON_INFO_TIMESTAMP		0x11
+/* max shader engines (SE) - needed for geometry shaders, etc. */
+#define RADEON_INFO_MAX_SE		0x12
+/* max SH per SE */
+#define RADEON_INFO_MAX_SH_PER_SE	0x13
+
+struct drm_radeon_info {
+	uint32_t		request;
+	uint32_t		pad;
+	uint64_t		value;
+};
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_drm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_drv.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_drv.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_drv.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,404 @@
+/* $MidnightBSD$ */
+/**
+ * \file radeon_drv.c
+ * ATI Radeon driver
+ *
+ * \author Gareth Hughes <gareth at valinux.com>
+ */
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_drv.c 317903 2017-05-07 11:11:51Z nyan $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+#include "radeon_gem.h"
+#include "radeon_kms.h"
+#include "radeon_irq_kms.h"
+
+#include <dev/drm2/drm_pciids.h>
+
+#include "fb_if.h"
+
+/*
+ * KMS wrapper.
+ * - 2.0.0 - initial interface
+ * - 2.1.0 - add square tiling interface
+ * - 2.2.0 - add r6xx/r7xx const buffer support
+ * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
+ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
+ * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
+ *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
+ *   2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
+ *   2.10.0 - fusion 2D tiling
+ *   2.11.0 - backend map, initial compute support for the CS checker
+ *   2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ *   2.13.0 - virtual memory support, streamout
+ *   2.14.0 - add evergreen tiling informations
+ *   2.15.0 - add max_pipes query
+ *   2.16.0 - fix evergreen 2D tiled surface calculation
+ *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
+ *   2.18.0 - r600-eg: allow "invalid" DB formats
+ *   2.19.0 - r600-eg: MSAA textures
+ *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
+ *   2.21.0 - r600-r700: FMASK and CMASK
+ *   2.22.0 - r600 only: RESOLVE_BOX allowed
+ *   2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
+ *   2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ *   2.25.0 - eg+: new info request for num SE and num SH
+ *   2.26.0 - r600-eg: fix htile size computation
+ *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ *   2.28.0 - r600-eg: Add MEM_WRITE packet support
+ *   2.29.0 - R500 FP16 color clear registers
+ */
+#define KMS_DRIVER_MAJOR	2
+#define KMS_DRIVER_MINOR	29
+#define KMS_DRIVER_PATCHLEVEL	0
+int radeon_suspend_kms(struct drm_device *dev);
+int radeon_resume_kms(struct drm_device *dev);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      int *vpos, int *hpos);
+extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern int radeon_max_kms_ioctl;
+#ifdef FREEBSD_WIP
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+#endif /* FREEBSD_WIP */
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+			     struct drm_device *dev,
+			     uint32_t handle);
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+					struct drm_gem_object *obj,
+					int flags);
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+					       struct dma_buf *dma_buf);
+
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor);
+void radeon_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+
+int radeon_no_wb;
+int radeon_modeset = 1;
+int radeon_dynclks = -1;
+int radeon_r4xx_atom = 0;
+int radeon_agpmode = 0;
+int radeon_vram_limit = 0;
+int radeon_gart_size = 512; /* default gart size */
+int radeon_benchmarking = 0;
+int radeon_testing = 0;
+int radeon_connector_table = 0;
+int radeon_tv = 1;
+int radeon_audio = 0;
+int radeon_disp_priority = 0;
+int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = -1;
+int radeon_msi = -1;
+int radeon_lockup_timeout = 10000;
+
+TUNABLE_INT("drm.radeon.no_wb", &radeon_no_wb);
+MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
+module_param_named(no_wb, radeon_no_wb, int, 0444);
+
+TUNABLE_INT("drm.radeon.modeset", &radeon_modeset);
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, radeon_modeset, int, 0400);
+
+TUNABLE_INT("drm.radeon.dynclks", &radeon_dynclks);
+MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
+module_param_named(dynclks, radeon_dynclks, int, 0444);
+
+TUNABLE_INT("drm.radeon.r4xx_atom", &radeon_r4xx_atom);
+MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
+module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
+
+TUNABLE_INT("drm.radeon.vramlimit", &radeon_vram_limit);
+MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
+module_param_named(vramlimit, radeon_vram_limit, int, 0600);
+
+TUNABLE_INT("drm.radeon.agpmode", &radeon_agpmode);
+MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
+module_param_named(agpmode, radeon_agpmode, int, 0444);
+
+TUNABLE_INT("drm.radeon.gartsize", &radeon_gart_size);
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+module_param_named(gartsize, radeon_gart_size, int, 0600);
+
+TUNABLE_INT("drm.radeon.benchmark", &radeon_benchmarking);
+MODULE_PARM_DESC(benchmark, "Run benchmark");
+module_param_named(benchmark, radeon_benchmarking, int, 0444);
+
+TUNABLE_INT("drm.radeon.test", &radeon_testing);
+MODULE_PARM_DESC(test, "Run tests");
+module_param_named(test, radeon_testing, int, 0444);
+
+TUNABLE_INT("drm.radeon.connector_table", &radeon_connector_table);
+MODULE_PARM_DESC(connector_table, "Force connector table");
+module_param_named(connector_table, radeon_connector_table, int, 0444);
+
+TUNABLE_INT("drm.radeon.tv", &radeon_tv);
+MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
+module_param_named(tv, radeon_tv, int, 0444);
+
+TUNABLE_INT("drm.radeon.audio", &radeon_audio);
+MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
+module_param_named(audio, radeon_audio, int, 0444);
+
+TUNABLE_INT("drm.radeon.disp_priority", &radeon_disp_priority);
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, radeon_disp_priority, int, 0444);
+
+TUNABLE_INT("drm.radeon.hw_i2c", &radeon_hw_i2c);
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+
+TUNABLE_INT("drm.radeon.pcie_gen2", &radeon_pcie_gen2);
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
+TUNABLE_INT("drm.radeon.msi", &radeon_msi);
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, radeon_msi, int, 0444);
+
+TUNABLE_INT("drm.radeon.lockup_timeout", &radeon_lockup_timeout);
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+
+static drm_pci_id_list_t pciidlist[] = {
+	radeon_PCI_IDS
+};
+
+static struct drm_driver kms_driver;
+
+static int radeon_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
+			      struct sysctl_oid *top)
+{
+	return drm_add_busid_modesetting(dev, ctx, top);
+}
+
+static struct drm_driver kms_driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+	    DRIVER_PRIME,
+#ifdef FREEBSD_WIP
+	.dev_priv_size = 0,
+#endif /* FREEBSD_WIP */
+	.load = radeon_driver_load_kms,
+	.firstopen = radeon_driver_firstopen_kms,
+	.open = radeon_driver_open_kms,
+	.preclose = radeon_driver_preclose_kms,
+	.postclose = radeon_driver_postclose_kms,
+	.lastclose = radeon_driver_lastclose_kms,
+	.unload = radeon_driver_unload_kms,
+#ifdef FREEBSD_WIP
+	.suspend = radeon_suspend_kms,
+	.resume = radeon_resume_kms,
+#endif /* FREEBSD_WIP */
+	.get_vblank_counter = radeon_get_vblank_counter_kms,
+	.enable_vblank = radeon_enable_vblank_kms,
+	.disable_vblank = radeon_disable_vblank_kms,
+	.get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+	.get_scanout_position = radeon_get_crtc_scanoutpos,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = radeon_debugfs_init,
+	.debugfs_cleanup = radeon_debugfs_cleanup,
+#endif
+	.irq_preinstall = radeon_driver_irq_preinstall_kms,
+	.irq_postinstall = radeon_driver_irq_postinstall_kms,
+	.irq_uninstall = radeon_driver_irq_uninstall_kms,
+	.irq_handler = radeon_driver_irq_handler_kms,
+	.sysctl_init = radeon_sysctl_init,
+	.ioctls = radeon_ioctls_kms,
+	.gem_init_object = radeon_gem_object_init,
+	.gem_free_object = radeon_gem_object_free,
+	.gem_open_object = radeon_gem_object_open,
+	.gem_close_object = radeon_gem_object_close,
+	.dma_ioctl = radeon_dma_ioctl_kms,
+	.dumb_create = radeon_mode_dumb_create,
+	.dumb_map_offset = radeon_mode_dumb_mmap,
+	.dumb_destroy = radeon_mode_dumb_destroy,
+#ifdef FREEBSD_WIP
+	.fops = &radeon_driver_kms_fops,
+#endif /* FREEBSD_WIP */
+
+#ifdef FREEBSD_WIP
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = radeon_gem_prime_export,
+	.gem_prime_import = radeon_gem_prime_import,
+#endif /* FREEBSD_WIP */
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = KMS_DRIVER_MAJOR,
+	.minor = KMS_DRIVER_MINOR,
+	.patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+
+#ifdef FREEBSD_WIP
+static int __init radeon_init(void)
+{
+	driver = &driver_old;
+	pdriver = &radeon_pci_driver;
+	driver->num_ioctls = radeon_max_ioctl;
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && radeon_modeset == -1) {
+		DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+		driver = &driver_old;
+		pdriver = &radeon_pci_driver;
+		driver->driver_features &= ~DRIVER_MODESET;
+		radeon_modeset = 0;
+	}
+#endif
+	/* if enabled by default */
+	if (radeon_modeset == -1) {
+#ifdef CONFIG_DRM_RADEON_KMS
+		DRM_INFO("radeon defaulting to kernel modesetting.\n");
+		radeon_modeset = 1;
+#else
+		DRM_INFO("radeon defaulting to userspace modesetting.\n");
+		radeon_modeset = 0;
+#endif
+	}
+	if (radeon_modeset == 1) {
+		DRM_INFO("radeon kernel modesetting enabled.\n");
+		driver = &kms_driver;
+		pdriver = &radeon_kms_pci_driver;
+		driver->driver_features |= DRIVER_MODESET;
+		driver->num_ioctls = radeon_max_kms_ioctl;
+		radeon_register_atpx_handler();
+	}
+	/* if the vga console setting is enabled still
+	 * let modprobe override it */
+	return drm_pci_init(driver, pdriver);
+}
+
+static void __exit radeon_exit(void)
+{
+	drm_pci_exit(driver, pdriver);
+	radeon_unregister_atpx_handler();
+}
+#endif /* FREEBSD_WIP */
+
+/* =================================================================== */
+
+static int
+radeon_probe(device_t kdev)
+{
+
+	return (-drm_probe_helper(kdev, pciidlist));
+}
+
+static int
+radeon_attach(device_t kdev)
+{
+
+	if (radeon_modeset == 1) {
+		kms_driver.driver_features |= DRIVER_MODESET;
+		kms_driver.num_ioctls = radeon_max_kms_ioctl;
+#ifdef COMPAT_FREEBSD32
+		kms_driver.compat_ioctls = radeon_compat_ioctls;
+		kms_driver.num_compat_ioctls = &radeon_num_compat_ioctls;
+#endif
+		radeon_register_atpx_handler();
+	}
+	return (-drm_attach_helper(kdev, pciidlist, &kms_driver));
+}
+
+static int
+radeon_suspend(device_t kdev)
+{
+	struct drm_device *dev;
+	int ret;
+
+	dev = device_get_softc(kdev);
+	ret = radeon_suspend_kms(dev);
+	if (ret)
+		return (-ret);
+
+	ret = bus_generic_suspend(kdev);
+
+	return (ret);
+}
+
+static int
+radeon_resume(device_t kdev)
+{
+	struct drm_device *dev;
+	int ret;
+
+	dev = device_get_softc(kdev);
+	ret = radeon_resume_kms(dev);
+	if (ret)
+		return (-ret);
+
+	ret = bus_generic_resume(kdev);
+
+	return (ret);
+}
+
+extern struct fb_info *	radeon_fb_helper_getinfo(device_t kdev);
+
+static device_method_t radeon_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe,		radeon_probe),
+	DEVMETHOD(device_attach,	radeon_attach),
+	DEVMETHOD(device_suspend,	radeon_suspend),
+	DEVMETHOD(device_resume,	radeon_resume),
+	DEVMETHOD(device_detach,	drm_generic_detach),
+
+	/* Framebuffer service methods */
+	DEVMETHOD(fb_getinfo,		radeon_fb_helper_getinfo),
+
+	DEVMETHOD_END
+};
+
+static driver_t radeon_driver = {
+	"drmn",
+	radeon_methods,
+	sizeof(struct drm_device)
+};
+
+extern devclass_t drm_devclass;
+DRIVER_MODULE_ORDERED(radeonkms, vgapci, radeon_driver, drm_devclass,
+    NULL, NULL, SI_ORDER_ANY);
+MODULE_DEPEND(radeonkms, drmn, 1, 1, 1);
+MODULE_DEPEND(radeonkms, agp, 1, 1, 1);
+MODULE_DEPEND(radeonkms, iicbus, 1, 1, 1);
+MODULE_DEPEND(radeonkms, iic, 1, 1, 1);
+MODULE_DEPEND(radeonkms, iicbb, 1, 1, 1);
+MODULE_DEPEND(radeonkms, firmware, 1, 1, 1);


Property changes on: trunk/sys/dev/drm2/radeon/radeon_drv.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_drv.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_drv.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_drv.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,2175 @@
+/* $MidnightBSD$ */
+/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Kevin E. Martin <martin at valinux.com>
+ *    Gareth Hughes <gareth at valinux.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_drv.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#ifndef __RADEON_DRV_H__
+#define __RADEON_DRV_H__
+
+#include "radeon_family.h"
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Gareth Hughes, Keith Whitwell, others."
+
+#define DRIVER_NAME		"radeon"
+#define DRIVER_DESC		"ATI Radeon"
+#define DRIVER_DATE		"20080528"
+
+/* Interface history:
+ *
+ * 1.1 - ??
+ * 1.2 - Add vertex2 ioctl (keith)
+ *     - Add stencil capability to clear ioctl (gareth, keith)
+ *     - Increase MAX_TEXTURE_LEVELS (brian)
+ * 1.3 - Add cmdbuf ioctl (keith)
+ *     - Add support for new radeon packets (keith)
+ *     - Add getparam ioctl (keith)
+ *     - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
+ * 1.4 - Add scratch registers to get_param ioctl.
+ * 1.5 - Add r200 packets to cmdbuf ioctl
+ *     - Add r200 function to init ioctl
+ *     - Add 'scalar2' instruction to cmdbuf
+ * 1.6 - Add static GART memory manager
+ *       Add irq handler (won't be turned on unless X server knows to)
+ *       Add irq ioctls and irq_active getparam.
+ *       Add wait command for cmdbuf ioctl
+ *       Add GART offset query for getparam
+ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
+ *       and R200_PP_CUBIC_OFFSET_F1_[0..5].
+ *       Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
+ *       R200_EMIT_PP_CUBIC_OFFSETS_[0..5].  (brian)
+ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
+ *       Add 'GET' queries for starting additional clients on different VT's.
+ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
+ *       Add texture rectangle support for r100.
+ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
+ *       clients use to tell the DRM where they think the framebuffer is
+ *       located in the card's address space
+ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
+ *       and GL_EXT_blend_[func|equation]_separate on r200
+ * 1.12- Add R300 CP microcode support - this just loads the CP on r300
+ *       (No 3D support yet - just microcode loading).
+ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
+ *     - Add hyperz support, add hyperz flags to clear ioctl.
+ * 1.14- Add support for color tiling
+ *     - Add R100/R200 surface allocation/free support
+ * 1.15- Add support for texture micro tiling
+ *     - Add support for r100 cube maps
+ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
+ *       texture filtering on r200
+ * 1.17- Add initial support for R300 (3D).
+ * 1.18- Add support for GL_ATI_fragment_shader, new packets
+ *       R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
+ *       R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
+ *       (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
+ * 1.19- Add support for gart table in FB memory and PCIE r300
+ * 1.20- Add support for r300 texrect
+ * 1.21- Add support for card type getparam
+ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
+ * 1.23- Add new radeon memory map work from benh
+ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
+ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
+ *       new packet type)
+ * 1.26- Add support for variable size PCI(E) gart aperture
+ * 1.27- Add support for IGP GART
+ * 1.28- Add support for VBL on CRTC2
+ * 1.29- R500 3D cmd buffer support
+ * 1.30- Add support for occlusion queries
+ * 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
+ * 1.33- Add r6xx/r7xx const buffer support
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		33
+#define DRIVER_PATCHLEVEL	0
+
+enum radeon_cp_microcode_version {
+	UCODE_R100,
+	UCODE_R200,
+	UCODE_R300,
+};
+
+typedef struct drm_radeon_freelist {
+	unsigned int age;
+	struct drm_buf *buf;
+	struct drm_radeon_freelist *next;
+	struct drm_radeon_freelist *prev;
+} drm_radeon_freelist_t;
+
+typedef struct drm_radeon_ring_buffer {
+	u32 *start;
+	u32 *end;
+	int size;
+	int size_l2qw;
+
+	int rptr_update; /* Double Words */
+	int rptr_update_l2qw; /* log2 Quad Words */
+
+	int fetch_size; /* Double Words */
+	int fetch_size_l2ow; /* log2 Oct Words */
+
+	u32 tail;
+	u32 tail_mask;
+	int space;
+
+	int high_mark;
+} drm_radeon_ring_buffer_t;
+
+typedef struct drm_radeon_depth_clear_t {
+	u32 rb3d_cntl;
+	u32 rb3d_zstencilcntl;
+	u32 se_cntl;
+} drm_radeon_depth_clear_t;
+
+struct drm_radeon_driver_file_fields {
+	int64_t radeon_fb_delta;
+};
+
+struct mem_block {
+	struct mem_block *next;
+	struct mem_block *prev;
+	int start;
+	int size;
+	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+struct radeon_surface {
+	int refcount;
+	u32 lower;
+	u32 upper;
+	u32 flags;
+};
+
+struct radeon_virt_surface {
+	int surface_index;
+	u32 lower;
+	u32 upper;
+	u32 flags;
+	struct drm_file *file_priv;
+#define PCIGART_FILE_PRIV	((void *) -1L)
+};
+
+#define RADEON_FLUSH_EMITED	(1 << 0)
+#define RADEON_PURGE_EMITED	(1 << 1)
+
+struct drm_radeon_master_private {
+	drm_local_map_t *sarea;
+	drm_radeon_sarea_t *sarea_priv;
+};
+
+typedef struct drm_radeon_private {
+	drm_radeon_ring_buffer_t ring;
+
+	u32 fb_location;
+	u32 fb_size;
+	int new_memmap;
+
+	int gart_size;
+	u32 gart_vm_start;
+	unsigned long gart_buffers_offset;
+
+	int cp_mode;
+	int cp_running;
+
+	drm_radeon_freelist_t *head;
+	drm_radeon_freelist_t *tail;
+	int last_buf;
+	int writeback_works;
+
+	int usec_timeout;
+
+	int microcode_version;
+
+	struct {
+		u32 boxes;
+		int freelist_timeouts;
+		int freelist_loops;
+		int requested_bufs;
+		int last_frame_reads;
+		int last_clear_reads;
+		int clears;
+		int texture_uploads;
+	} stats;
+
+	int do_boxes;
+	int page_flipping;
+
+	u32 color_fmt;
+	unsigned int front_offset;
+	unsigned int front_pitch;
+	unsigned int back_offset;
+	unsigned int back_pitch;
+
+	u32 depth_fmt;
+	unsigned int depth_offset;
+	unsigned int depth_pitch;
+
+	u32 front_pitch_offset;
+	u32 back_pitch_offset;
+	u32 depth_pitch_offset;
+
+	drm_radeon_depth_clear_t depth_clear;
+
+	unsigned long ring_offset;
+	unsigned long ring_rptr_offset;
+	unsigned long buffers_offset;
+	unsigned long gart_textures_offset;
+
+	drm_local_map_t *sarea;
+	drm_local_map_t *cp_ring;
+	drm_local_map_t *ring_rptr;
+	drm_local_map_t *gart_textures;
+
+	struct mem_block *gart_heap;
+	struct mem_block *fb_heap;
+
+	/* SW interrupt */
+	wait_queue_head_t swi_queue;
+	atomic_t swi_emitted;
+	int vblank_crtc;
+	uint32_t irq_enable_reg;
+	uint32_t r500_disp_irq_reg;
+
+	struct radeon_surface surfaces[RADEON_MAX_SURFACES];
+	struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
+
+	unsigned long pcigart_offset;
+	unsigned int pcigart_offset_set;
+	struct drm_ati_pcigart_info gart_info;
+
+	u32 scratch_ages[5];
+
+	int have_z_offset;
+
+	/* starting from here on, data is preserved across an open */
+	uint32_t flags;		/* see radeon_chip_flags */
+	resource_size_t fb_aper_offset;
+
+	int num_gb_pipes;
+	int num_z_pipes;
+	int track_flush;
+	drm_local_map_t *mmio;
+
+	/* r6xx/r7xx pipe/shader config */
+	int r600_max_pipes;
+	int r600_max_tile_pipes;
+	int r600_max_simds;
+	int r600_max_backends;
+	int r600_max_gprs;
+	int r600_max_threads;
+	int r600_max_stack_entries;
+	int r600_max_hw_contexts;
+	int r600_max_gs_threads;
+	int r600_sx_max_export_size;
+	int r600_sx_max_export_pos_size;
+	int r600_sx_max_export_smx_size;
+	int r600_sq_num_cf_insts;
+	int r700_sx_num_of_sets;
+	int r700_sc_prim_fifo_size;
+	int r700_sc_hiz_tile_fifo_size;
+	int r700_sc_earlyz_tile_fifo_fize;
+	int r600_group_size;
+	int r600_npipes;
+	int r600_nbanks;
+
+	struct sx cs_mutex;
+	u32 cs_id_scnt;
+	u32 cs_id_wcnt;
+	/* r6xx/r7xx drm blit vertex buffer */
+	struct drm_buf *blit_vb;
+
+	/* firmware */
+	const struct firmware *me_fw, *pfp_fw;
+} drm_radeon_private_t;
+
+typedef struct drm_radeon_buf_priv {
+	u32 age;
+} drm_radeon_buf_priv_t;
+
+struct drm_buffer;
+
+typedef struct drm_radeon_kcmd_buffer {
+	int bufsz;
+	struct drm_buffer *buffer;
+	int nbox;
+	struct drm_clip_rect __user *boxes;
+} drm_radeon_kcmd_buffer_t;
+
+extern int radeon_no_wb;
+extern struct drm_ioctl_desc radeon_ioctls[];
+extern int radeon_max_ioctl;
+#ifdef COMPAT_FREEBSD32
+extern struct drm_ioctl_desc radeon_compat_ioctls[];
+extern int radeon_num_compat_ioctls;
+#endif
+
+extern u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv);
+extern void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val);
+
+#define GET_RING_HEAD(dev_priv)	radeon_get_ring_head(dev_priv)
+#define SET_RING_HEAD(dev_priv, val) radeon_set_ring_head(dev_priv, val)
+
+/* Check whether the given hardware address is inside the framebuffer or the
+ * GART area.
+ */
+static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
+					  u64 off)
+{
+	u32 fb_start = dev_priv->fb_location;
+	u32 fb_end = fb_start + dev_priv->fb_size - 1;
+	u32 gart_start = dev_priv->gart_vm_start;
+	u32 gart_end = gart_start + dev_priv->gart_size - 1;
+
+	return ((off >= fb_start && off <= fb_end) ||
+		(off >= gart_start && off <= gart_end));
+}
+
+/* radeon_state.c */
+extern void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf);
+
+				/* radeon_cp.c */
+extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
+extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
+extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
+
+extern void radeon_freelist_reset(struct drm_device * dev);
+extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
+
+extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
+
+extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
+
+extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
+extern int radeon_presetup(struct drm_device *dev);
+extern int radeon_driver_postcleanup(struct drm_device *dev);
+
+extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern void radeon_mem_takedown(struct mem_block **heap);
+extern void radeon_mem_release(struct drm_file *file_priv,
+			       struct mem_block *heap);
+
+extern void radeon_enable_bm(struct drm_radeon_private *dev_priv);
+extern u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off);
+extern void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val);
+
+				/* radeon_irq.c */
+extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
+extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern void radeon_do_release(struct drm_device * dev);
+extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
+extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
+extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern void radeon_driver_irq_preinstall(struct drm_device * dev);
+extern int radeon_driver_irq_postinstall(struct drm_device *dev);
+extern void radeon_driver_irq_uninstall(struct drm_device * dev);
+extern void radeon_enable_interrupt(struct drm_device *dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
+
+extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
+extern int radeon_driver_unload(struct drm_device *dev);
+extern int radeon_driver_firstopen(struct drm_device *dev);
+extern void radeon_driver_preclose(struct drm_device *dev,
+				   struct drm_file *file_priv);
+extern void radeon_driver_postclose(struct drm_device *dev,
+				    struct drm_file *file_priv);
+extern void radeon_driver_lastclose(struct drm_device * dev);
+extern int radeon_driver_open(struct drm_device *dev,
+			      struct drm_file *file_priv);
+extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg);
+extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+				    unsigned long arg);
+
+extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
+extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
+extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master);
+/* r300_cmdbuf.c */
+extern void r300_init_reg_flags(struct drm_device *dev);
+
+extern int r300_do_cp_cmdbuf(struct drm_device *dev,
+			     struct drm_file *file_priv,
+			     drm_radeon_kcmd_buffer_t *cmdbuf);
+
+/* r600_cp.c */
+extern int r600_do_engine_reset(struct drm_device *dev);
+extern int r600_do_cleanup_cp(struct drm_device *dev);
+extern int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+			   struct drm_file *file_priv);
+extern int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv);
+extern int r600_do_cp_idle(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_start(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_reset(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_stop(drm_radeon_private_t *dev_priv);
+extern int r600_cp_dispatch_indirect(struct drm_device *dev,
+				     struct drm_buf *buf, int start, int end);
+extern int r600_page_table_init(struct drm_device *dev);
+extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
+extern int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
+extern void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv);
+extern int r600_cp_dispatch_texture(struct drm_device *dev,
+				    struct drm_file *file_priv,
+				    drm_radeon_texture_t *tex,
+				    drm_radeon_tex_image_t *image);
+/* r600_blit.c */
+extern int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv);
+extern void r600_done_blit_copy(struct drm_device *dev);
+extern void r600_blit_copy(struct drm_device *dev,
+			   uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+			   int size_bytes);
+extern void r600_blit_swap(struct drm_device *dev,
+			   uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+			   int sx, int sy, int dx, int dy,
+			   int w, int h, int src_pitch, int dst_pitch, int cpp);
+
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
+
+/* Flags for stats.boxes
+ */
+#define RADEON_BOX_DMA_IDLE      0x1
+#define RADEON_BOX_RING_FULL     0x2
+#define RADEON_BOX_FLIP          0x4
+#define RADEON_BOX_WAIT_IDLE     0x8
+#define RADEON_BOX_TEXTURE_LOAD  0x10
+
+/* Register definitions, register access macros and drmAddMap constants
+ * for Radeon kernel driver.
+ */
+#define RADEON_MM_INDEX		        0x0000
+#define RADEON_MM_DATA		        0x0004
+
+#define RADEON_AGP_COMMAND		0x0f60
+#define RADEON_AGP_COMMAND_PCI_CONFIG   0x0060	/* offset in PCI config */
+#	define RADEON_AGP_ENABLE	(1<<8)
+#define RADEON_AUX_SCISSOR_CNTL		0x26f0
+#	define RADEON_EXCLUSIVE_SCISSOR_0	(1 << 24)
+#	define RADEON_EXCLUSIVE_SCISSOR_1	(1 << 25)
+#	define RADEON_EXCLUSIVE_SCISSOR_2	(1 << 26)
+#	define RADEON_SCISSOR_0_ENABLE		(1 << 28)
+#	define RADEON_SCISSOR_1_ENABLE		(1 << 29)
+#	define RADEON_SCISSOR_2_ENABLE		(1 << 30)
+
+/*
+ * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
+ * don't have an explicit bus mastering disable bit.  It's handled
+ * by the PCI D-states.  PMI_BM_DIS disables D-state bus master
+ * handling, not bus mastering itself.
+ */
+#define RADEON_BUS_CNTL			0x0030
+/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+#	define RADEON_BUS_MASTER_DIS		(1 << 6)
+/* rs600/rs690/rs740 */
+#	define RS600_BUS_MASTER_DIS		(1 << 14)
+#	define RS600_MSI_REARM		        (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+#define RADEON_BUS_CNTL1		0x0034
+#	define RADEON_PMI_BM_DIS		(1 << 2)
+#	define RADEON_PMI_INT_DIS		(1 << 3)
+
+#define RV370_BUS_CNTL			0x004c
+#	define RV370_PMI_BM_DIS		        (1 << 5)
+#	define RV370_PMI_INT_DIS		(1 << 6)
+
+#define RADEON_MSI_REARM_EN		0x0160
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#	define RV370_MSI_REARM_EN		(1 << 0)
+
+#define RADEON_CLOCK_CNTL_DATA		0x000c
+#	define RADEON_PLL_WR_EN			(1 << 7)
+#define RADEON_CLOCK_CNTL_INDEX		0x0008
+#define RADEON_CONFIG_APER_SIZE		0x0108
+#define RADEON_CONFIG_MEMSIZE		0x00f8
+#define RADEON_CRTC_OFFSET		0x0224
+#define RADEON_CRTC_OFFSET_CNTL		0x0228
+#	define RADEON_CRTC_TILE_EN		(1 << 15)
+#	define RADEON_CRTC_OFFSET_FLIP_CNTL	(1 << 16)
+#define RADEON_CRTC2_OFFSET		0x0324
+#define RADEON_CRTC2_OFFSET_CNTL	0x0328
+
+#define RADEON_PCIE_INDEX               0x0030
+#define RADEON_PCIE_DATA                0x0034
+#define RADEON_PCIE_TX_GART_CNTL	0x10
+#	define RADEON_PCIE_TX_GART_EN		(1 << 0)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
+#	define RADEON_PCIE_TX_GART_MODE_32_128_CACHE	(0 << 3)
+#	define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE	(1 << 3)
+#	define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
+#	define RADEON_PCIE_TX_GART_INVALIDATE_TLB	(1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE	0x13
+#define RADEON_PCIE_TX_GART_START_LO	0x14
+#define RADEON_PCIE_TX_GART_START_HI	0x15
+#define RADEON_PCIE_TX_GART_END_LO	0x16
+#define RADEON_PCIE_TX_GART_END_HI	0x17
+
+#define RS480_NB_MC_INDEX               0x168
+#	define RS480_NB_MC_IND_WR_EN	(1 << 8)
+#define RS480_NB_MC_DATA                0x16c
+
+#define RS690_MC_INDEX                  0x78
+#   define RS690_MC_INDEX_MASK          0x1ff
+#   define RS690_MC_INDEX_WR_EN         (1 << 9)
+#   define RS690_MC_INDEX_WR_ACK        0x7f
+#define RS690_MC_DATA                   0x7c
+
+/* MC indirect registers */
+#define RS480_MC_MISC_CNTL              0x18
+#	define RS480_DISABLE_GTW	(1 << 1)
+/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
+#	define RS480_GART_INDEX_REG_EN	(1 << 12)
+#	define RS690_BLOCK_GFX_D3_EN	(1 << 14)
+#define RS480_K8_FB_LOCATION            0x1e
+#define RS480_GART_FEATURE_ID           0x2b
+#	define RS480_HANG_EN	        (1 << 11)
+#	define RS480_TLB_ENABLE	        (1 << 18)
+#	define RS480_P2P_ENABLE	        (1 << 19)
+#	define RS480_GTW_LAC_EN	        (1 << 25)
+#	define RS480_2LEVEL_GART	(0 << 30)
+#	define RS480_1LEVEL_GART	(1 << 30)
+#	define RS480_PDC_EN	        (1U << 31)
+#define RS480_GART_BASE                 0x2c
+#define RS480_GART_CACHE_CNTRL          0x2e
+#	define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
+#	define RS480_GART_EN	        (1 << 0)
+#	define RS480_VA_SIZE_32MB	(0 << 1)
+#	define RS480_VA_SIZE_64MB	(1 << 1)
+#	define RS480_VA_SIZE_128MB	(2 << 1)
+#	define RS480_VA_SIZE_256MB	(3 << 1)
+#	define RS480_VA_SIZE_512MB	(4 << 1)
+#	define RS480_VA_SIZE_1GB	(5 << 1)
+#	define RS480_VA_SIZE_2GB	(6 << 1)
+#define RS480_AGP_MODE_CNTL             0x39
+#	define RS480_POST_GART_Q_SIZE	(1 << 18)
+#	define RS480_NONGART_SNOOP	(1 << 19)
+#	define RS480_AGP_RD_BUF_SIZE	(1 << 20)
+#	define RS480_REQ_TYPE_SNOOP_SHIFT 22
+#	define RS480_REQ_TYPE_SNOOP_MASK  0x3
+#	define RS480_REQ_TYPE_SNOOP_DIS	(1 << 24)
+#define RS480_MC_MISC_UMA_CNTL          0x5f
+#define RS480_MC_MCLK_CNTL              0x7a
+#define RS480_MC_UMA_DUALCH_CNTL        0x86
+
+#define RS690_MC_FB_LOCATION            0x100
+#define RS690_MC_AGP_LOCATION           0x101
+#define RS690_MC_AGP_BASE               0x102
+#define RS690_MC_AGP_BASE_2             0x103
+
+#define RS600_MC_INDEX                          0x70
+#       define RS600_MC_ADDR_MASK               0xffff
+#       define RS600_MC_IND_SEQ_RBS_0           (1 << 16)
+#       define RS600_MC_IND_SEQ_RBS_1           (1 << 17)
+#       define RS600_MC_IND_SEQ_RBS_2           (1 << 18)
+#       define RS600_MC_IND_SEQ_RBS_3           (1 << 19)
+#       define RS600_MC_IND_AIC_RBS             (1 << 20)
+#       define RS600_MC_IND_CITF_ARB0           (1 << 21)
+#       define RS600_MC_IND_CITF_ARB1           (1 << 22)
+#       define RS600_MC_IND_WR_EN               (1 << 23)
+#define RS600_MC_DATA                           0x74
+
+#define RS600_MC_STATUS                         0x0
+#       define RS600_MC_IDLE                    (1 << 1)
+#define RS600_MC_FB_LOCATION                    0x4
+#define RS600_MC_AGP_LOCATION                   0x5
+#define RS600_AGP_BASE                          0x6
+#define RS600_AGP_BASE_2                        0x7
+#define RS600_MC_CNTL1                          0x9
+#       define RS600_ENABLE_PAGE_TABLES         (1 << 26)
+#define RS600_MC_PT0_CNTL                       0x100
+#       define RS600_ENABLE_PT                  (1 << 0)
+#       define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+#       define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+#       define RS600_INVALIDATE_ALL_L1_TLBS     (1 << 28)
+#       define RS600_INVALIDATE_L2_CACHE        (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL              0x102
+#       define RS600_ENABLE_PAGE_TABLE          (1 << 0)
+#       define RS600_PAGE_TABLE_TYPE_FLAT       (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR   0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR  0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR    0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR   0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR     0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL               0x16c
+#       define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE       (1 << 0)
+#       define RS600_TRANSLATION_MODE_OVERRIDE              (1 << 1)
+#       define RS600_SYSTEM_ACCESS_MODE_MASK                (3 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_PA_ONLY             (0 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP         (1 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_IN_SYS              (2 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS          (3 << 8)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH        (0 << 10)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE       (1 << 10)
+#       define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+#       define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+#       define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+#       define RS600_INVALIDATE_L1_TLB          (1 << 20)
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA  0x74
+
+#define RV515_MC_FB_LOCATION 0x01
+#define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_BASE     0x03
+#define RV515_MC_AGP_BASE_2   0x04
+
+#define R520_MC_FB_LOCATION 0x04
+#define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_BASE     0x06
+#define R520_MC_AGP_BASE_2   0x07
+
+#define RADEON_MPP_TB_CONFIG		0x01c0
+#define RADEON_MEM_CNTL			0x0140
+#define RADEON_MEM_SDRAM_MODE_REG	0x0158
+#define RADEON_AGP_BASE_2		0x015c /* r200+ only */
+#define RS480_AGP_BASE_2		0x0164
+#define RADEON_AGP_BASE			0x0170
+
+/* pipe config regs */
+#define R400_GB_PIPE_SELECT             0x402c
+#define RV530_GB_PIPE_SELECT2           0x4124
+#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
+#define R300_GB_TILE_CONFIG             0x4018
+#       define R300_ENABLE_TILING       (1 << 0)
+#       define R300_PIPE_COUNT_RV350    (0 << 1)
+#       define R300_PIPE_COUNT_R300     (3 << 1)
+#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
+#       define R300_PIPE_COUNT_R420     (7 << 1)
+#       define R300_TILE_SIZE_8         (0 << 4)
+#       define R300_TILE_SIZE_16        (1 << 4)
+#       define R300_TILE_SIZE_32        (2 << 4)
+#       define R300_SUBPIXEL_1_12       (0 << 16)
+#       define R300_SUBPIXEL_1_16       (1 << 16)
+#define R300_DST_PIPE_CONFIG            0x170c
+#       define R300_PIPE_AUTO_CONFIG    (1U << 31)
+#define R300_RB2D_DSTCACHE_MODE         0x3428
+#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_RB3D_COLOROFFSET		0x1c40
+#define RADEON_RB3D_COLORPITCH		0x1c48
+
+#define	RADEON_SRC_X_Y			0x1590
+
+#define RADEON_DP_GUI_MASTER_CNTL	0x146c
+#	define RADEON_GMC_SRC_PITCH_OFFSET_CNTL	(1 << 0)
+#	define RADEON_GMC_DST_PITCH_OFFSET_CNTL	(1 << 1)
+#	define RADEON_GMC_BRUSH_SOLID_COLOR	(13 << 4)
+#	define RADEON_GMC_BRUSH_NONE		(15 << 4)
+#	define RADEON_GMC_DST_16BPP		(4 << 8)
+#	define RADEON_GMC_DST_24BPP		(5 << 8)
+#	define RADEON_GMC_DST_32BPP		(6 << 8)
+#	define RADEON_GMC_DST_DATATYPE_SHIFT	8
+#	define RADEON_GMC_SRC_DATATYPE_COLOR	(3 << 12)
+#	define RADEON_DP_SRC_SOURCE_MEMORY	(2 << 24)
+#	define RADEON_DP_SRC_SOURCE_HOST_DATA	(3 << 24)
+#	define RADEON_GMC_CLR_CMP_CNTL_DIS	(1 << 28)
+#	define RADEON_GMC_WR_MSK_DIS		(1 << 30)
+#	define RADEON_ROP3_S			0x00cc0000
+#	define RADEON_ROP3_P			0x00f00000
+#define RADEON_DP_WRITE_MASK		0x16cc
+#define RADEON_SRC_PITCH_OFFSET		0x1428
+#define RADEON_DST_PITCH_OFFSET		0x142c
+#define RADEON_DST_PITCH_OFFSET_C	0x1c80
+#	define RADEON_DST_TILE_LINEAR		(0 << 30)
+#	define RADEON_DST_TILE_MACRO		(1 << 30)
+#	define RADEON_DST_TILE_MICRO		(2U << 30)
+#	define RADEON_DST_TILE_BOTH		(3U << 30)
+
+#define RADEON_SCRATCH_REG0		0x15e0
+#define RADEON_SCRATCH_REG1		0x15e4
+#define RADEON_SCRATCH_REG2		0x15e8
+#define RADEON_SCRATCH_REG3		0x15ec
+#define RADEON_SCRATCH_REG4		0x15f0
+#define RADEON_SCRATCH_REG5		0x15f4
+#define RADEON_SCRATCH_UMSK		0x0770
+#define RADEON_SCRATCH_ADDR		0x0774
+
+#define RADEON_SCRATCHOFF( x )		(RADEON_SCRATCH_REG_OFFSET + 4*(x))
+
+extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
+
+#define GET_SCRATCH(dev_priv, x) radeon_get_scratch(dev_priv, x)
+
+#define R600_SCRATCH_REG0		0x8500
+#define R600_SCRATCH_REG1		0x8504
+#define R600_SCRATCH_REG2		0x8508
+#define R600_SCRATCH_REG3		0x850c
+#define R600_SCRATCH_REG4		0x8510
+#define R600_SCRATCH_REG5		0x8514
+#define R600_SCRATCH_REG6		0x8518
+#define R600_SCRATCH_REG7		0x851c
+#define R600_SCRATCH_UMSK		0x8540
+#define R600_SCRATCH_ADDR		0x8544
+
+#define R600_SCRATCHOFF(x)		(R600_SCRATCH_REG_OFFSET + 4*(x))
+
+#define RADEON_GEN_INT_CNTL		0x0040
+#	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
+#	define RADEON_GUI_IDLE_INT_ENABLE	(1 << 19)
+#	define RADEON_SW_INT_ENABLE		(1 << 25)
+
+#define RADEON_GEN_INT_STATUS		0x0044
+#	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
+#	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
+#	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
+#	define RADEON_GUI_IDLE_INT_TEST_ACK     (1 << 19)
+#	define RADEON_SW_INT_TEST		(1 << 25)
+#	define RADEON_SW_INT_TEST_ACK		(1 << 25)
+#	define RADEON_SW_INT_FIRE		(1 << 26)
+#       define R500_DISPLAY_INT_STATUS          (1 << 0)
+
+#define RADEON_HOST_PATH_CNTL		0x0130
+#	define RADEON_HDP_SOFT_RESET		(1 << 26)
+#	define RADEON_HDP_WC_TIMEOUT_MASK	(7 << 28)
+#	define RADEON_HDP_WC_TIMEOUT_28BCLK	(7 << 28)
+
+#define RADEON_ISYNC_CNTL		0x1724
+#	define RADEON_ISYNC_ANY2D_IDLE3D	(1 << 0)
+#	define RADEON_ISYNC_ANY3D_IDLE2D	(1 << 1)
+#	define RADEON_ISYNC_TRIG2D_IDLE3D	(1 << 2)
+#	define RADEON_ISYNC_TRIG3D_IDLE2D	(1 << 3)
+#	define RADEON_ISYNC_WAIT_IDLEGUI	(1 << 4)
+#	define RADEON_ISYNC_CPSCRATCH_IDLEGUI	(1 << 5)
+
+#define RADEON_RBBM_GUICNTL		0x172c
+#	define RADEON_HOST_DATA_SWAP_NONE	(0 << 0)
+#	define RADEON_HOST_DATA_SWAP_16BIT	(1 << 0)
+#	define RADEON_HOST_DATA_SWAP_32BIT	(2 << 0)
+#	define RADEON_HOST_DATA_SWAP_HDW	(3 << 0)
+
+#define RADEON_MC_AGP_LOCATION		0x014c
+#define RADEON_MC_FB_LOCATION		0x0148
+#define RADEON_MCLK_CNTL		0x0012
+#	define RADEON_FORCEON_MCLKA		(1 << 16)
+#	define RADEON_FORCEON_MCLKB		(1 << 17)
+#	define RADEON_FORCEON_YCLKA		(1 << 18)
+#	define RADEON_FORCEON_YCLKB		(1 << 19)
+#	define RADEON_FORCEON_MC		(1 << 20)
+#	define RADEON_FORCEON_AIC		(1 << 21)
+
+#define RADEON_PP_BORDER_COLOR_0	0x1d40
+#define RADEON_PP_BORDER_COLOR_1	0x1d44
+#define RADEON_PP_BORDER_COLOR_2	0x1d48
+#define RADEON_PP_CNTL			0x1c38
+#	define RADEON_SCISSOR_ENABLE		(1 <<  1)
+#define RADEON_PP_LUM_MATRIX		0x1d00
+#define RADEON_PP_MISC			0x1c14
+#define RADEON_PP_ROT_MATRIX_0		0x1d58
+#define RADEON_PP_TXFILTER_0		0x1c54
+#define RADEON_PP_TXOFFSET_0		0x1c5c
+#define RADEON_PP_TXFILTER_1		0x1c6c
+#define RADEON_PP_TXFILTER_2		0x1c84
+
+#define R300_RB2D_DSTCACHE_CTLSTAT	0x342c /* use R300_DSTCACHE_CTLSTAT */
+#define R300_DSTCACHE_CTLSTAT		0x1714
+#	define R300_RB2D_DC_FLUSH		(3 << 0)
+#	define R300_RB2D_DC_FREE		(3 << 2)
+#	define R300_RB2D_DC_FLUSH_ALL		0xf
+#	define R300_RB2D_DC_BUSY		(1U << 31)
+#define RADEON_RB3D_CNTL		0x1c3c
+#	define RADEON_ALPHA_BLEND_ENABLE	(1 << 0)
+#	define RADEON_PLANE_MASK_ENABLE		(1 << 1)
+#	define RADEON_DITHER_ENABLE		(1 << 2)
+#	define RADEON_ROUND_ENABLE		(1 << 3)
+#	define RADEON_SCALE_DITHER_ENABLE	(1 << 4)
+#	define RADEON_DITHER_INIT		(1 << 5)
+#	define RADEON_ROP_ENABLE		(1 << 6)
+#	define RADEON_STENCIL_ENABLE		(1 << 7)
+#	define RADEON_Z_ENABLE			(1 << 8)
+#	define RADEON_ZBLOCK16			(1 << 15)
+#define RADEON_RB3D_DEPTHOFFSET		0x1c24
+#define RADEON_RB3D_DEPTHCLEARVALUE	0x3230
+#define RADEON_RB3D_DEPTHPITCH		0x1c28
+#define RADEON_RB3D_PLANEMASK		0x1d84
+#define RADEON_RB3D_STENCILREFMASK	0x1d7c
+#define RADEON_RB3D_ZCACHE_MODE		0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT	0x3254
+#	define RADEON_RB3D_ZC_FLUSH		(1 << 0)
+#	define RADEON_RB3D_ZC_FREE		(1 << 2)
+#	define RADEON_RB3D_ZC_FLUSH_ALL		0x5
+#	define RADEON_RB3D_ZC_BUSY		(1U << 31)
+#define R300_ZB_ZCACHE_CTLSTAT                  0x4f18
+#	define R300_ZC_FLUSH		        (1 << 0)
+#	define R300_ZC_FREE		        (1 << 1)
+#	define R300_ZC_BUSY		        (1U << 31)
+#define RADEON_RB3D_DSTCACHE_CTLSTAT	0x325c
+#	define RADEON_RB3D_DC_FLUSH		(3 << 0)
+#	define RADEON_RB3D_DC_FREE		(3 << 2)
+#	define RADEON_RB3D_DC_FLUSH_ALL		0xf
+#	define RADEON_RB3D_DC_BUSY		(1U << 31)
+#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
+#	define R300_RB3D_DC_FLUSH		(2 << 0)
+#	define R300_RB3D_DC_FREE		(2 << 2)
+#	define R300_RB3D_DC_FINISH		(1 << 4)
+#define RADEON_RB3D_ZSTENCILCNTL	0x1c2c
+#	define RADEON_Z_TEST_MASK		(7 << 4)
+#	define RADEON_Z_TEST_ALWAYS		(7 << 4)
+#	define RADEON_Z_HIERARCHY_ENABLE	(1 << 8)
+#	define RADEON_STENCIL_TEST_ALWAYS	(7 << 12)
+#	define RADEON_STENCIL_S_FAIL_REPLACE	(2 << 16)
+#	define RADEON_STENCIL_ZPASS_REPLACE	(2 << 20)
+#	define RADEON_STENCIL_ZFAIL_REPLACE	(2 << 24)
+#	define RADEON_Z_COMPRESSION_ENABLE	(1 << 28)
+#	define RADEON_FORCE_Z_DIRTY		(1 << 29)
+#	define RADEON_Z_WRITE_ENABLE		(1 << 30)
+#	define RADEON_Z_DECOMPRESSION_ENABLE	(1U << 31)
+#define RADEON_RBBM_SOFT_RESET		0x00f0
+#	define RADEON_SOFT_RESET_CP		(1 <<  0)
+#	define RADEON_SOFT_RESET_HI		(1 <<  1)
+#	define RADEON_SOFT_RESET_SE		(1 <<  2)
+#	define RADEON_SOFT_RESET_RE		(1 <<  3)
+#	define RADEON_SOFT_RESET_PP		(1 <<  4)
+#	define RADEON_SOFT_RESET_E2		(1 <<  5)
+#	define RADEON_SOFT_RESET_RB		(1 <<  6)
+#	define RADEON_SOFT_RESET_HDP		(1 <<  7)
+/*
+ *   6:0  Available slots in the FIFO
+ *   8    Host Interface active
+ *   9    CP request active
+ *   10   FIFO request active
+ *   11   Host Interface retry active
+ *   12   CP retry active
+ *   13   FIFO retry active
+ *   14   FIFO pipeline busy
+ *   15   Event engine busy
+ *   16   CP command stream busy
+ *   17   2D engine busy
+ *   18   2D portion of render backend busy
+ *   20   3D setup engine busy
+ *   26   GA engine busy
+ *   27   CBA 2D engine busy
+ *   31   2D engine busy or 3D engine busy or FIFO not empty or CP busy or
+ *           command stream queue not empty or Ring Buffer not empty
+ */
+#define RADEON_RBBM_STATUS		0x0e40
+/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register.  */
+/* #define RADEON_RBBM_STATUS		0x1740 */
+/* bits 6:0 are dword slots available in the cmd fifo */
+#	define RADEON_RBBM_FIFOCNT_MASK		0x007f
+#	define RADEON_HIRQ_ON_RBB	(1 <<  8)
+#	define RADEON_CPRQ_ON_RBB	(1 <<  9)
+#	define RADEON_CFRQ_ON_RBB	(1 << 10)
+#	define RADEON_HIRQ_IN_RTBUF	(1 << 11)
+#	define RADEON_CPRQ_IN_RTBUF	(1 << 12)
+#	define RADEON_CFRQ_IN_RTBUF	(1 << 13)
+#	define RADEON_PIPE_BUSY		(1 << 14)
+#	define RADEON_ENG_EV_BUSY	(1 << 15)
+#	define RADEON_CP_CMDSTRM_BUSY	(1 << 16)
+#	define RADEON_E2_BUSY		(1 << 17)
+#	define RADEON_RB2D_BUSY		(1 << 18)
+#	define RADEON_RB3D_BUSY		(1 << 19) /* not used on r300 */
+#	define RADEON_VAP_BUSY		(1 << 20)
+#	define RADEON_RE_BUSY		(1 << 21) /* not used on r300 */
+#	define RADEON_TAM_BUSY		(1 << 22) /* not used on r300 */
+#	define RADEON_TDM_BUSY		(1 << 23) /* not used on r300 */
+#	define RADEON_PB_BUSY		(1 << 24) /* not used on r300 */
+#	define RADEON_TIM_BUSY		(1 << 25) /* not used on r300 */
+#	define RADEON_GA_BUSY		(1 << 26)
+#	define RADEON_CBA2D_BUSY	(1 << 27)
+#	define RADEON_RBBM_ACTIVE	(1U << 31)
+#define RADEON_RE_LINE_PATTERN		0x1cd0
+#define RADEON_RE_MISC			0x26c4
+#define RADEON_RE_TOP_LEFT		0x26c0
+#define RADEON_RE_WIDTH_HEIGHT		0x1c44
+#define RADEON_RE_STIPPLE_ADDR		0x1cc8
+#define RADEON_RE_STIPPLE_DATA		0x1ccc
+
+#define RADEON_SCISSOR_TL_0		0x1cd8
+#define RADEON_SCISSOR_BR_0		0x1cdc
+#define RADEON_SCISSOR_TL_1		0x1ce0
+#define RADEON_SCISSOR_BR_1		0x1ce4
+#define RADEON_SCISSOR_TL_2		0x1ce8
+#define RADEON_SCISSOR_BR_2		0x1cec
+#define RADEON_SE_COORD_FMT		0x1c50
+#define RADEON_SE_CNTL			0x1c4c
+#	define RADEON_FFACE_CULL_CW		(0 << 0)
+#	define RADEON_BFACE_SOLID		(3 << 1)
+#	define RADEON_FFACE_SOLID		(3 << 3)
+#	define RADEON_FLAT_SHADE_VTX_LAST	(3 << 6)
+#	define RADEON_DIFFUSE_SHADE_FLAT	(1 << 8)
+#	define RADEON_DIFFUSE_SHADE_GOURAUD	(2 << 8)
+#	define RADEON_ALPHA_SHADE_FLAT		(1 << 10)
+#	define RADEON_ALPHA_SHADE_GOURAUD	(2 << 10)
+#	define RADEON_SPECULAR_SHADE_FLAT	(1 << 12)
+#	define RADEON_SPECULAR_SHADE_GOURAUD	(2 << 12)
+#	define RADEON_FOG_SHADE_FLAT		(1 << 14)
+#	define RADEON_FOG_SHADE_GOURAUD		(2 << 14)
+#	define RADEON_VPORT_XY_XFORM_ENABLE	(1 << 24)
+#	define RADEON_VPORT_Z_XFORM_ENABLE	(1 << 25)
+#	define RADEON_VTX_PIX_CENTER_OGL	(1 << 27)
+#	define RADEON_ROUND_MODE_TRUNC		(0 << 28)
+#	define RADEON_ROUND_PREC_8TH_PIX	(1 << 30)
+#define RADEON_SE_CNTL_STATUS		0x2140
+#define RADEON_SE_LINE_WIDTH		0x1db8
+#define RADEON_SE_VPORT_XSCALE		0x1d98
+#define RADEON_SE_ZBIAS_FACTOR		0x1db0
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT         0x2254
+#define RADEON_SE_TCL_VECTOR_INDX_REG        0x2200
+#       define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT  16
+#       define RADEON_VEC_INDX_DWORD_COUNT_SHIFT     28
+#define RADEON_SE_TCL_VECTOR_DATA_REG       0x2204
+#define RADEON_SE_TCL_SCALAR_INDX_REG       0x2208
+#       define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT  16
+#define RADEON_SE_TCL_SCALAR_DATA_REG       0x220C
+#define RADEON_SURFACE_ACCESS_FLAGS	0x0bf8
+#define RADEON_SURFACE_ACCESS_CLR	0x0bfc
+#define RADEON_SURFACE_CNTL		0x0b00
+#	define RADEON_SURF_TRANSLATION_DIS	(1 << 8)
+#	define RADEON_NONSURF_AP0_SWP_MASK	(3 << 20)
+#	define RADEON_NONSURF_AP0_SWP_LITTLE	(0 << 20)
+#	define RADEON_NONSURF_AP0_SWP_BIG16	(1 << 20)
+#	define RADEON_NONSURF_AP0_SWP_BIG32	(2 << 20)
+#	define RADEON_NONSURF_AP1_SWP_MASK	(3 << 22)
+#	define RADEON_NONSURF_AP1_SWP_LITTLE	(0 << 22)
+#	define RADEON_NONSURF_AP1_SWP_BIG16	(1 << 22)
+#	define RADEON_NONSURF_AP1_SWP_BIG32	(2 << 22)
+#define RADEON_SURFACE0_INFO		0x0b0c
+#	define RADEON_SURF_PITCHSEL_MASK	(0x1ff << 0)
+#	define RADEON_SURF_TILE_MODE_MASK	(3 << 16)
+#	define RADEON_SURF_TILE_MODE_MACRO	(0 << 16)
+#	define RADEON_SURF_TILE_MODE_MICRO	(1 << 16)
+#	define RADEON_SURF_TILE_MODE_32BIT_Z	(2 << 16)
+#	define RADEON_SURF_TILE_MODE_16BIT_Z	(3 << 16)
+#define RADEON_SURFACE0_LOWER_BOUND	0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND	0x0b08
+#	define RADEON_SURF_ADDRESS_FIXED_MASK	(0x3ff << 0)
+#define RADEON_SURFACE1_INFO		0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND	0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND	0x0b18
+#define RADEON_SURFACE2_INFO		0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND	0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND	0x0b28
+#define RADEON_SURFACE3_INFO		0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND	0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND	0x0b38
+#define RADEON_SURFACE4_INFO		0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND	0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND	0x0b48
+#define RADEON_SURFACE5_INFO		0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND	0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND	0x0b58
+#define RADEON_SURFACE6_INFO		0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND	0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND	0x0b68
+#define RADEON_SURFACE7_INFO		0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND	0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND	0x0b78
+#define RADEON_SW_SEMAPHORE		0x013c
+
+#define RADEON_WAIT_UNTIL		0x1720
+#	define RADEON_WAIT_CRTC_PFLIP		(1 << 0)
+#	define RADEON_WAIT_2D_IDLE		(1 << 14)
+#	define RADEON_WAIT_3D_IDLE		(1 << 15)
+#	define RADEON_WAIT_2D_IDLECLEAN		(1 << 16)
+#	define RADEON_WAIT_3D_IDLECLEAN		(1 << 17)
+#	define RADEON_WAIT_HOST_IDLECLEAN	(1 << 18)
+
+#define RADEON_RB3D_ZMASKOFFSET		0x3234
+#define RADEON_RB3D_ZSTENCILCNTL	0x1c2c
+#	define RADEON_DEPTH_FORMAT_16BIT_INT_Z	(0 << 0)
+#	define RADEON_DEPTH_FORMAT_24BIT_INT_Z	(2 << 0)
+
+/* CP registers */
+#define RADEON_CP_ME_RAM_ADDR		0x07d4
+#define RADEON_CP_ME_RAM_RADDR		0x07d8
+#define RADEON_CP_ME_RAM_DATAH		0x07dc
+#define RADEON_CP_ME_RAM_DATAL		0x07e0
+
+#define RADEON_CP_RB_BASE		0x0700
+#define RADEON_CP_RB_CNTL		0x0704
+#	define RADEON_BUF_SWAP_32BIT		(2 << 16)
+#	define RADEON_RB_NO_UPDATE		(1 << 27)
+#	define RADEON_RB_RPTR_WR_ENA		(1U << 31)
+#define RADEON_CP_RB_RPTR_ADDR		0x070c
+#define RADEON_CP_RB_RPTR		0x0710
+#define RADEON_CP_RB_WPTR		0x0714
+
+#define RADEON_CP_RB_WPTR_DELAY		0x0718
+#	define RADEON_PRE_WRITE_TIMER_SHIFT	0
+#	define RADEON_PRE_WRITE_LIMIT_SHIFT	23
+
+#define RADEON_CP_IB_BASE		0x0738
+
+#define RADEON_CP_CSQ_CNTL		0x0740
+#	define RADEON_CSQ_CNT_PRIMARY_MASK	(0xff << 0)
+#	define RADEON_CSQ_PRIDIS_INDDIS		(0 << 28)
+#	define RADEON_CSQ_PRIPIO_INDDIS		(1 << 28)
+#	define RADEON_CSQ_PRIBM_INDDIS		(2 << 28)
+#	define RADEON_CSQ_PRIPIO_INDBM		(3 << 28)
+#	define RADEON_CSQ_PRIBM_INDBM		(4 << 28)
+#	define RADEON_CSQ_PRIPIO_INDPIO		(15 << 28)
+
+#define R300_CP_RESYNC_ADDR		0x0778
+#define R300_CP_RESYNC_DATA		0x077c
+
+#define RADEON_AIC_CNTL			0x01d0
+#	define RADEON_PCIGART_TRANSLATE_EN	(1 << 0)
+#	define RS400_MSI_REARM	                (1 << 3)
+#define RADEON_AIC_STAT			0x01d4
+#define RADEON_AIC_PT_BASE		0x01d8
+#define RADEON_AIC_LO_ADDR		0x01dc
+#define RADEON_AIC_HI_ADDR		0x01e0
+#define RADEON_AIC_TLB_ADDR		0x01e4
+#define RADEON_AIC_TLB_DATA		0x01e8
+
+/* CP command packets */
+#define RADEON_CP_PACKET0		0x00000000
+#	define RADEON_ONE_REG_WR		(1 << 15)
+#define RADEON_CP_PACKET1		0x40000000
+#define RADEON_CP_PACKET2		0x80000000
+#define RADEON_CP_PACKET3		0xC0000000
+#       define RADEON_CP_NOP                    0x00001000
+#       define RADEON_CP_NEXT_CHAR              0x00001900
+#       define RADEON_CP_PLY_NEXTSCAN           0x00001D00
+#       define RADEON_CP_SET_SCISSORS           0x00001E00
+	     /* GEN_INDX_PRIM is unsupported starting with R300 */
+#	define RADEON_3D_RNDR_GEN_INDX_PRIM	0x00002300
+#	define RADEON_WAIT_FOR_IDLE		0x00002600
+#	define RADEON_3D_DRAW_VBUF		0x00002800
+#	define RADEON_3D_DRAW_IMMD		0x00002900
+#	define RADEON_3D_DRAW_INDX		0x00002A00
+#       define RADEON_CP_LOAD_PALETTE           0x00002C00
+#	define RADEON_3D_LOAD_VBPNTR		0x00002F00
+#	define RADEON_MPEG_IDCT_MACROBLOCK	0x00003000
+#	define RADEON_MPEG_IDCT_MACROBLOCK_REV	0x00003100
+#	define RADEON_3D_CLEAR_ZMASK		0x00003200
+#	define RADEON_CP_INDX_BUFFER		0x00003300
+#       define RADEON_CP_3D_DRAW_VBUF_2         0x00003400
+#       define RADEON_CP_3D_DRAW_IMMD_2         0x00003500
+#       define RADEON_CP_3D_DRAW_INDX_2         0x00003600
+#	define RADEON_3D_CLEAR_HIZ		0x00003700
+#       define RADEON_CP_3D_CLEAR_CMASK         0x00003802
+#	define RADEON_CNTL_HOSTDATA_BLT		0x00009400
+#	define RADEON_CNTL_PAINT_MULTI		0x00009A00
+#	define RADEON_CNTL_BITBLT_MULTI		0x00009B00
+#	define RADEON_CNTL_SET_SCISSORS		0xC0001E00
+
+#       define R600_IT_INDIRECT_BUFFER_END      0x00001700
+#       define R600_IT_SET_PREDICATION          0x00002000
+#       define R600_IT_REG_RMW                  0x00002100
+#       define R600_IT_COND_EXEC                0x00002200
+#       define R600_IT_PRED_EXEC                0x00002300
+#       define R600_IT_START_3D_CMDBUF          0x00002400
+#       define R600_IT_DRAW_INDEX_2             0x00002700
+#       define R600_IT_CONTEXT_CONTROL          0x00002800
+#       define R600_IT_DRAW_INDEX_IMMD_BE       0x00002900
+#       define R600_IT_INDEX_TYPE               0x00002A00
+#       define R600_IT_DRAW_INDEX               0x00002B00
+#       define R600_IT_DRAW_INDEX_AUTO          0x00002D00
+#       define R600_IT_DRAW_INDEX_IMMD          0x00002E00
+#       define R600_IT_NUM_INSTANCES            0x00002F00
+#       define R600_IT_STRMOUT_BUFFER_UPDATE    0x00003400
+#       define R600_IT_INDIRECT_BUFFER_MP       0x00003800
+#       define R600_IT_MEM_SEMAPHORE            0x00003900
+#       define R600_IT_MPEG_INDEX               0x00003A00
+#       define R600_IT_WAIT_REG_MEM             0x00003C00
+#       define R600_IT_MEM_WRITE                0x00003D00
+#       define R600_IT_INDIRECT_BUFFER          0x00003200
+#       define R600_IT_SURFACE_SYNC             0x00004300
+#              define R600_CB0_DEST_BASE_ENA    (1 << 6)
+#              define R600_TC_ACTION_ENA        (1 << 23)
+#              define R600_VC_ACTION_ENA        (1 << 24)
+#              define R600_CB_ACTION_ENA        (1 << 25)
+#              define R600_DB_ACTION_ENA        (1 << 26)
+#              define R600_SH_ACTION_ENA        (1 << 27)
+#              define R600_SMX_ACTION_ENA       (1 << 28)
+#       define R600_IT_ME_INITIALIZE            0x00004400
+#	       define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#       define R600_IT_COND_WRITE               0x00004500
+#       define R600_IT_EVENT_WRITE              0x00004600
+#       define R600_IT_EVENT_WRITE_EOP          0x00004700
+#       define R600_IT_ONE_REG_WRITE            0x00005700
+#       define R600_IT_SET_CONFIG_REG           0x00006800
+#              define R600_SET_CONFIG_REG_OFFSET 0x00008000
+#              define R600_SET_CONFIG_REG_END   0x0000ac00
+#       define R600_IT_SET_CONTEXT_REG          0x00006900
+#              define R600_SET_CONTEXT_REG_OFFSET 0x00028000
+#              define R600_SET_CONTEXT_REG_END  0x00029000
+#       define R600_IT_SET_ALU_CONST            0x00006A00
+#              define R600_SET_ALU_CONST_OFFSET 0x00030000
+#              define R600_SET_ALU_CONST_END    0x00032000
+#       define R600_IT_SET_BOOL_CONST           0x00006B00
+#              define R600_SET_BOOL_CONST_OFFSET 0x0003e380
+#              define R600_SET_BOOL_CONST_END   0x00040000
+#       define R600_IT_SET_LOOP_CONST           0x00006C00
+#              define R600_SET_LOOP_CONST_OFFSET 0x0003e200
+#              define R600_SET_LOOP_CONST_END   0x0003e380
+#       define R600_IT_SET_RESOURCE             0x00006D00
+#              define R600_SET_RESOURCE_OFFSET  0x00038000
+#              define R600_SET_RESOURCE_END     0x0003c000
+#              define R600_SQ_TEX_VTX_INVALID_TEXTURE  0x0
+#              define R600_SQ_TEX_VTX_INVALID_BUFFER   0x1
+#              define R600_SQ_TEX_VTX_VALID_TEXTURE    0x2
+#              define R600_SQ_TEX_VTX_VALID_BUFFER     0x3
+#       define R600_IT_SET_SAMPLER              0x00006E00
+#              define R600_SET_SAMPLER_OFFSET   0x0003c000
+#              define R600_SET_SAMPLER_END      0x0003cff0
+#       define R600_IT_SET_CTL_CONST            0x00006F00
+#              define R600_SET_CTL_CONST_OFFSET 0x0003cff0
+#              define R600_SET_CTL_CONST_END    0x0003e200
+#       define R600_IT_SURFACE_BASE_UPDATE      0x00007300
+
+#define RADEON_CP_PACKET_MASK		0xC0000000
+#define RADEON_CP_PACKET_COUNT_MASK	0x3fff0000
+#define RADEON_CP_PACKET0_REG_MASK	0x000007ff
+#define RADEON_CP_PACKET1_REG0_MASK	0x000007ff
+#define RADEON_CP_PACKET1_REG1_MASK	0x003ff800
+
+#define RADEON_VTX_Z_PRESENT			(1U << 31)
+#define RADEON_VTX_PKCOLOR_PRESENT		(1 << 3)
+
+#define RADEON_PRIM_TYPE_NONE			(0 << 0)
+#define RADEON_PRIM_TYPE_POINT			(1 << 0)
+#define RADEON_PRIM_TYPE_LINE			(2 << 0)
+#define RADEON_PRIM_TYPE_LINE_STRIP		(3 << 0)
+#define RADEON_PRIM_TYPE_TRI_LIST		(4 << 0)
+#define RADEON_PRIM_TYPE_TRI_FAN		(5 << 0)
+#define RADEON_PRIM_TYPE_TRI_STRIP		(6 << 0)
+#define RADEON_PRIM_TYPE_TRI_TYPE2		(7 << 0)
+#define RADEON_PRIM_TYPE_RECT_LIST		(8 << 0)
+#define RADEON_PRIM_TYPE_3VRT_POINT_LIST	(9 << 0)
+#define RADEON_PRIM_TYPE_3VRT_LINE_LIST		(10 << 0)
+#define RADEON_PRIM_TYPE_MASK                   0xf
+#define RADEON_PRIM_WALK_IND			(1 << 4)
+#define RADEON_PRIM_WALK_LIST			(2 << 4)
+#define RADEON_PRIM_WALK_RING			(3 << 4)
+#define RADEON_COLOR_ORDER_BGRA			(0 << 6)
+#define RADEON_COLOR_ORDER_RGBA			(1 << 6)
+#define RADEON_MAOS_ENABLE			(1 << 7)
+#define RADEON_VTX_FMT_R128_MODE		(0 << 8)
+#define RADEON_VTX_FMT_RADEON_MODE		(1 << 8)
+#define RADEON_NUM_VERTICES_SHIFT		16
+
+#define RADEON_COLOR_FORMAT_CI8		2
+#define RADEON_COLOR_FORMAT_ARGB1555	3
+#define RADEON_COLOR_FORMAT_RGB565	4
+#define RADEON_COLOR_FORMAT_ARGB8888	6
+#define RADEON_COLOR_FORMAT_RGB332	7
+#define RADEON_COLOR_FORMAT_RGB8	9
+#define RADEON_COLOR_FORMAT_ARGB4444	15
+
+#define RADEON_TXFORMAT_I8		0
+#define RADEON_TXFORMAT_AI88		1
+#define RADEON_TXFORMAT_RGB332		2
+#define RADEON_TXFORMAT_ARGB1555	3
+#define RADEON_TXFORMAT_RGB565		4
+#define RADEON_TXFORMAT_ARGB4444	5
+#define RADEON_TXFORMAT_ARGB8888	6
+#define RADEON_TXFORMAT_RGBA8888	7
+#define RADEON_TXFORMAT_Y8		8
+#define RADEON_TXFORMAT_VYUY422         10
+#define RADEON_TXFORMAT_YVYU422         11
+#define RADEON_TXFORMAT_DXT1            12
+#define RADEON_TXFORMAT_DXT23           14
+#define RADEON_TXFORMAT_DXT45           15
+
+#define R200_PP_TXCBLEND_0                0x2f00
+#define R200_PP_TXCBLEND_1                0x2f10
+#define R200_PP_TXCBLEND_2                0x2f20
+#define R200_PP_TXCBLEND_3                0x2f30
+#define R200_PP_TXCBLEND_4                0x2f40
+#define R200_PP_TXCBLEND_5                0x2f50
+#define R200_PP_TXCBLEND_6                0x2f60
+#define R200_PP_TXCBLEND_7                0x2f70
+#define R200_SE_TCL_LIGHT_MODEL_CTL_0     0x2268
+#define R200_PP_TFACTOR_0                 0x2ee0
+#define R200_SE_VTX_FMT_0                 0x2088
+#define R200_SE_VAP_CNTL                  0x2080
+#define R200_SE_TCL_MATRIX_SEL_0          0x2230
+#define R200_SE_TCL_TEX_PROC_CTL_2        0x22a8
+#define R200_SE_TCL_UCP_VERT_BLEND_CTL    0x22c0
+#define R200_PP_TXFILTER_5                0x2ca0
+#define R200_PP_TXFILTER_4                0x2c80
+#define R200_PP_TXFILTER_3                0x2c60
+#define R200_PP_TXFILTER_2                0x2c40
+#define R200_PP_TXFILTER_1                0x2c20
+#define R200_PP_TXFILTER_0                0x2c00
+#define R200_PP_TXOFFSET_5                0x2d78
+#define R200_PP_TXOFFSET_4                0x2d60
+#define R200_PP_TXOFFSET_3                0x2d48
+#define R200_PP_TXOFFSET_2                0x2d30
+#define R200_PP_TXOFFSET_1                0x2d18
+#define R200_PP_TXOFFSET_0                0x2d00
+
+#define R200_PP_CUBIC_FACES_0             0x2c18
+#define R200_PP_CUBIC_FACES_1             0x2c38
+#define R200_PP_CUBIC_FACES_2             0x2c58
+#define R200_PP_CUBIC_FACES_3             0x2c78
+#define R200_PP_CUBIC_FACES_4             0x2c98
+#define R200_PP_CUBIC_FACES_5             0x2cb8
+#define R200_PP_CUBIC_OFFSET_F1_0         0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0         0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0         0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0         0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0         0x2d14
+#define R200_PP_CUBIC_OFFSET_F1_1         0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1         0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1         0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1         0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1         0x2d2c
+#define R200_PP_CUBIC_OFFSET_F1_2         0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2         0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2         0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2         0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2         0x2d44
+#define R200_PP_CUBIC_OFFSET_F1_3         0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3         0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3         0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3         0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3         0x2d5c
+#define R200_PP_CUBIC_OFFSET_F1_4         0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4         0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4         0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4         0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4         0x2d74
+#define R200_PP_CUBIC_OFFSET_F1_5         0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5         0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5         0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5         0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5         0x2d8c
+
+#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
+#define R200_SE_VTE_CNTL                  0x20b0
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL   0x2250
+#define R200_PP_TAM_DEBUG3                0x2d9c
+#define R200_PP_CNTL_X                    0x2cc4
+#define R200_SE_VAP_CNTL_STATUS           0x2140
+#define R200_RE_SCISSOR_TL_0              0x1cd8
+#define R200_RE_SCISSOR_TL_1              0x1ce0
+#define R200_RE_SCISSOR_TL_2              0x1ce8
+#define R200_RB3D_DEPTHXY_OFFSET          0x1d60
+#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
+#define R200_SE_VTX_STATE_CNTL            0x2180
+#define R200_RE_POINTSIZE                 0x2648
+#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
+
+#define RADEON_PP_TEX_SIZE_0                0x1d04	/* NPOT */
+#define RADEON_PP_TEX_SIZE_1                0x1d0c
+#define RADEON_PP_TEX_SIZE_2                0x1d14
+
+#define RADEON_PP_CUBIC_FACES_0             0x1d24
+#define RADEON_PP_CUBIC_FACES_1             0x1d28
+#define RADEON_PP_CUBIC_FACES_2             0x1d2c
+#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0	/* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
+
+#define RADEON_SE_TCL_STATE_FLUSH           0x2284
+
+#define SE_VAP_CNTL__TCL_ENA_MASK                          0x00000001
+#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK                   0x00010000
+#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT                 0x00000012
+#define SE_VTE_CNTL__VTX_XY_FMT_MASK                       0x00000100
+#define SE_VTE_CNTL__VTX_Z_FMT_MASK                        0x00000200
+#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK                  0x00000001
+#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK                  0x00000002
+#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT               0x0000000b
+#define R200_3D_DRAW_IMMD_2      0xC0003500
+#define R200_SE_VTX_FMT_1                 0x208c
+#define R200_RE_CNTL                      0x1c50
+
+#define R200_RB3D_BLENDCOLOR              0x3218
+
+#define R200_SE_TCL_POINT_SPRITE_CNTL     0x22c4
+
+#define R200_PP_TRI_PERF 0x2cf8
+
+#define R200_PP_AFS_0                     0x2f80
+#define R200_PP_AFS_1                     0x2f00	/* same as txcblend_0 */
+
+#define R200_VAP_PVS_CNTL_1               0x22D0
+
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+
+#define R500_D1CRTC_STATUS 0x609c
+#define R500_D2CRTC_STATUS 0x689c
+#define R500_CRTC_V_BLANK (1<<0)
+
+#define R500_D1CRTC_FRAME_COUNT 0x60a4
+#define R500_D2CRTC_FRAME_COUNT 0x68a4
+
+#define R500_D1MODE_V_COUNTER 0x6530
+#define R500_D2MODE_V_COUNTER 0x6d30
+
+#define R500_D1MODE_VBLANK_STATUS 0x6534
+#define R500_D2MODE_VBLANK_STATUS 0x6d34
+#define R500_VBLANK_OCCURED (1<<0)
+#define R500_VBLANK_ACK     (1<<4)
+#define R500_VBLANK_STAT    (1<<12)
+#define R500_VBLANK_INT     (1<<16)
+
+#define R500_DxMODE_INT_MASK 0x6540
+#define R500_D1MODE_INT_MASK (1<<0)
+#define R500_D2MODE_INT_MASK (1<<8)
+
+#define R500_DISP_INTERRUPT_STATUS 0x7edc
+#define R500_D1_VBLANK_INTERRUPT (1 << 4)
+#define R500_D2_VBLANK_INTERRUPT (1 << 5)
+
+/* R6xx/R7xx registers */
+#define R600_MC_VM_FB_LOCATION                                 0x2180
+#define R600_MC_VM_AGP_TOP                                     0x2184
+#define R600_MC_VM_AGP_BOT                                     0x2188
+#define R600_MC_VM_AGP_BASE                                    0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR                    0x2190
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR                   0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR                0x2198
+
+#define R700_MC_VM_FB_LOCATION                                 0x2024
+#define R700_MC_VM_AGP_TOP                                     0x2028
+#define R700_MC_VM_AGP_BOT                                     0x202c
+#define R700_MC_VM_AGP_BASE                                    0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR                    0x2034
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR                   0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR                0x203c
+
+#define R600_MCD_RD_A_CNTL                                     0x219c
+#define R600_MCD_RD_B_CNTL                                     0x21a0
+
+#define R600_MCD_WR_A_CNTL                                     0x21a4
+#define R600_MCD_WR_B_CNTL                                     0x21a8
+
+#define R600_MCD_RD_SYS_CNTL                                   0x2200
+#define R600_MCD_WR_SYS_CNTL                                   0x2214
+
+#define R600_MCD_RD_GFX_CNTL                                   0x21fc
+#define R600_MCD_RD_HDP_CNTL                                   0x2204
+#define R600_MCD_RD_PDMA_CNTL                                  0x2208
+#define R600_MCD_RD_SEM_CNTL                                   0x220c
+#define R600_MCD_WR_GFX_CNTL                                   0x2210
+#define R600_MCD_WR_HDP_CNTL                                   0x2218
+#define R600_MCD_WR_PDMA_CNTL                                  0x221c
+#define R600_MCD_WR_SEM_CNTL                                   0x2220
+
+#       define R600_MCD_L1_TLB                                 (1 << 0)
+#       define R600_MCD_L1_FRAG_PROC                           (1 << 1)
+#       define R600_MCD_L1_STRICT_ORDERING                     (1 << 2)
+
+#       define R600_MCD_SYSTEM_ACCESS_MODE_MASK                (3 << 6)
+#       define R600_MCD_SYSTEM_ACCESS_MODE_PA_ONLY             (0 << 6)
+#       define R600_MCD_SYSTEM_ACCESS_MODE_USE_SYS_MAP         (1 << 6)
+#       define R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS              (2 << 6)
+#       define R600_MCD_SYSTEM_ACCESS_MODE_NOT_IN_SYS          (3 << 6)
+
+#       define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU    (0 << 8)
+#       define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8)
+
+#       define R600_MCD_SEMAPHORE_MODE                         (1 << 10)
+#       define R600_MCD_WAIT_L2_QUERY                          (1 << 11)
+#       define R600_MCD_EFFECTIVE_L1_TLB_SIZE(x)               ((x) << 12)
+#       define R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(x)             ((x) << 15)
+
+#define R700_MC_VM_MD_L1_TLB0_CNTL                             0x2654
+#define R700_MC_VM_MD_L1_TLB1_CNTL                             0x2658
+#define R700_MC_VM_MD_L1_TLB2_CNTL                             0x265c
+
+#define R700_MC_VM_MB_L1_TLB0_CNTL                             0x2234
+#define R700_MC_VM_MB_L1_TLB1_CNTL                             0x2238
+#define R700_MC_VM_MB_L1_TLB2_CNTL                             0x223c
+#define R700_MC_VM_MB_L1_TLB3_CNTL                             0x2240
+
+#       define R700_ENABLE_L1_TLB                              (1 << 0)
+#       define R700_ENABLE_L1_FRAGMENT_PROCESSING              (1 << 1)
+#       define R700_SYSTEM_ACCESS_MODE_IN_SYS                  (2 << 3)
+#       define R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU  (0 << 5)
+#       define R700_EFFECTIVE_L1_TLB_SIZE(x)                   ((x) << 15)
+#       define R700_EFFECTIVE_L1_QUEUE_SIZE(x)                 ((x) << 18)
+
+#define R700_MC_ARB_RAMCFG                                     0x2760
+#       define R700_NOOFBANK_SHIFT                             0
+#       define R700_NOOFBANK_MASK                              0x3
+#       define R700_NOOFRANK_SHIFT                             2
+#       define R700_NOOFRANK_MASK                              0x1
+#       define R700_NOOFROWS_SHIFT                             3
+#       define R700_NOOFROWS_MASK                              0x7
+#       define R700_NOOFCOLS_SHIFT                             6
+#       define R700_NOOFCOLS_MASK                              0x3
+#       define R700_CHANSIZE_SHIFT                             8
+#       define R700_CHANSIZE_MASK                              0x1
+#       define R700_BURSTLENGTH_SHIFT                          9
+#       define R700_BURSTLENGTH_MASK                           0x1
+#define R600_RAMCFG                                            0x2408
+#       define R600_NOOFBANK_SHIFT                             0
+#       define R600_NOOFBANK_MASK                              0x1
+#       define R600_NOOFRANK_SHIFT                             1
+#       define R600_NOOFRANK_MASK                              0x1
+#       define R600_NOOFROWS_SHIFT                             2
+#       define R600_NOOFROWS_MASK                              0x7
+#       define R600_NOOFCOLS_SHIFT                             5
+#       define R600_NOOFCOLS_MASK                              0x3
+#       define R600_CHANSIZE_SHIFT                             7
+#       define R600_CHANSIZE_MASK                              0x1
+#       define R600_BURSTLENGTH_SHIFT                          8
+#       define R600_BURSTLENGTH_MASK                           0x1
+
+#define R600_VM_L2_CNTL                                        0x1400
+#       define R600_VM_L2_CACHE_EN                             (1 << 0)
+#       define R600_VM_L2_FRAG_PROC                            (1 << 1)
+#       define R600_VM_ENABLE_PTE_CACHE_LRU_W                  (1 << 9)
+#       define R600_VM_L2_CNTL_QUEUE_SIZE(x)                   ((x) << 13)
+#       define R700_VM_L2_CNTL_QUEUE_SIZE(x)                   ((x) << 14)
+
+#define R600_VM_L2_CNTL2                                       0x1404
+#       define R600_VM_L2_CNTL2_INVALIDATE_ALL_L1_TLBS         (1 << 0)
+#       define R600_VM_L2_CNTL2_INVALIDATE_L2_CACHE            (1 << 1)
+#define R600_VM_L2_CNTL3                                       0x1408
+#       define R600_VM_L2_CNTL3_BANK_SELECT_0(x)               ((x) << 0)
+#       define R600_VM_L2_CNTL3_BANK_SELECT_1(x)               ((x) << 5)
+#       define R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(x)           ((x) << 10)
+#       define R700_VM_L2_CNTL3_BANK_SELECT(x)                 ((x) << 0)
+#       define R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(x)           ((x) << 6)
+
+#define R600_VM_L2_STATUS                                      0x140c
+
+#define R600_VM_CONTEXT0_CNTL                                  0x1410
+#       define R600_VM_ENABLE_CONTEXT                          (1 << 0)
+#       define R600_VM_PAGE_TABLE_DEPTH_FLAT                   (0 << 1)
+
+#define R600_VM_CONTEXT0_CNTL2                                 0x1430
+#define R600_VM_CONTEXT0_REQUEST_RESPONSE                      0x1470
+#define R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR                 0x1490
+#define R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR                0x14b0
+#define R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                  0x1574
+#define R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR                 0x1594
+#define R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR                   0x15b4
+
+#define R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                  0x153c
+#define R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR                 0x155c
+#define R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR                   0x157c
+
+#define R600_HDP_HOST_PATH_CNTL                                0x2c00
+
+#define R600_GRBM_CNTL                                         0x8000
+#       define R600_GRBM_READ_TIMEOUT(x)                       ((x) << 0)
+
+#define R600_GRBM_STATUS                                       0x8010
+#       define R600_CMDFIFO_AVAIL_MASK                         0x1f
+#       define R700_CMDFIFO_AVAIL_MASK                         0xf
+#       define R600_GUI_ACTIVE                                 (1U << 31)
+#define R600_GRBM_STATUS2                                      0x8014
+#define R600_GRBM_SOFT_RESET                                   0x8020
+#       define R600_SOFT_RESET_CP                              (1 << 0)
+#define R600_WAIT_UNTIL		                               0x8040
+
+#define R600_CP_SEM_WAIT_TIMER                                 0x85bc
+#define R600_CP_ME_CNTL                                        0x86d8
+#       define R600_CP_ME_HALT                                 (1 << 28)
+#define R600_CP_QUEUE_THRESHOLDS                               0x8760
+#       define R600_ROQ_IB1_START(x)                           ((x) << 0)
+#       define R600_ROQ_IB2_START(x)                           ((x) << 8)
+#define R600_CP_MEQ_THRESHOLDS                                 0x8764
+#       define R700_STQ_SPLIT(x)                               ((x) << 0)
+#       define R600_MEQ_END(x)                                 ((x) << 16)
+#       define R600_ROQ_END(x)                                 ((x) << 24)
+#define R600_CP_PERFMON_CNTL                                   0x87fc
+#define R600_CP_RB_BASE                                        0xc100
+#define R600_CP_RB_CNTL                                        0xc104
+#       define R600_RB_BUFSZ(x)                                ((x) << 0)
+#       define R600_RB_BLKSZ(x)                                ((x) << 8)
+#	define R600_BUF_SWAP_32BIT		               (2 << 16)
+#       define R600_RB_NO_UPDATE                               (1 << 27)
+#       define R600_RB_RPTR_WR_ENA                             (1U << 31)
+#define R600_CP_RB_RPTR_WR                                     0xc108
+#define R600_CP_RB_RPTR_ADDR                                   0xc10c
+#define R600_CP_RB_RPTR_ADDR_HI                                0xc110
+#define R600_CP_RB_WPTR                                        0xc114
+#define R600_CP_RB_WPTR_ADDR                                   0xc118
+#define R600_CP_RB_WPTR_ADDR_HI                                0xc11c
+#define R600_CP_RB_RPTR                                        0x8700
+#define R600_CP_RB_WPTR_DELAY                                  0x8704
+#define R600_CP_PFP_UCODE_ADDR                                 0xc150
+#define R600_CP_PFP_UCODE_DATA                                 0xc154
+#define R600_CP_ME_RAM_RADDR                                   0xc158
+#define R600_CP_ME_RAM_WADDR                                   0xc15c
+#define R600_CP_ME_RAM_DATA                                    0xc160
+#define R600_CP_DEBUG                                          0xc1fc
+
+#define R600_PA_CL_ENHANCE                                     0x8a14
+#       define R600_CLIP_VTX_REORDER_ENA                       (1 << 0)
+#       define R600_NUM_CLIP_SEQ(x)                            ((x) << 1)
+#define R600_PA_SC_LINE_STIPPLE_STATE                          0x8b10
+#define R600_PA_SC_MULTI_CHIP_CNTL                             0x8b20
+#define R700_PA_SC_FORCE_EOV_MAX_CNTS                          0x8b24
+#       define R700_FORCE_EOV_MAX_CLK_CNT(x)                   ((x) << 0)
+#       define R700_FORCE_EOV_MAX_REZ_CNT(x)                   ((x) << 16)
+#define R600_PA_SC_AA_SAMPLE_LOCS_2S                           0x8b40
+#define R600_PA_SC_AA_SAMPLE_LOCS_4S                           0x8b44
+#define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0                       0x8b48
+#define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1                       0x8b4c
+#       define R600_S0_X(x)                                    ((x) << 0)
+#       define R600_S0_Y(x)                                    ((x) << 4)
+#       define R600_S1_X(x)                                    ((x) << 8)
+#       define R600_S1_Y(x)                                    ((x) << 12)
+#       define R600_S2_X(x)                                    ((x) << 16)
+#       define R600_S2_Y(x)                                    ((x) << 20)
+#       define R600_S3_X(x)                                    ((x) << 24)
+#       define R600_S3_Y(x)                                    ((x) << 28)
+#       define R600_S4_X(x)                                    ((x) << 0)
+#       define R600_S4_Y(x)                                    ((x) << 4)
+#       define R600_S5_X(x)                                    ((x) << 8)
+#       define R600_S5_Y(x)                                    ((x) << 12)
+#       define R600_S6_X(x)                                    ((x) << 16)
+#       define R600_S6_Y(x)                                    ((x) << 20)
+#       define R600_S7_X(x)                                    ((x) << 24)
+#       define R600_S7_Y(x)                                    ((x) << 28)
+#define R600_PA_SC_FIFO_SIZE                                   0x8bd0
+#       define R600_SC_PRIM_FIFO_SIZE(x)                       ((x) << 0)
+#       define R600_SC_HIZ_TILE_FIFO_SIZE(x)                   ((x) << 8)
+#       define R600_SC_EARLYZ_TILE_FIFO_SIZE(x)                ((x) << 16)
+#define R700_PA_SC_FIFO_SIZE_R7XX                              0x8bcc
+#       define R700_SC_PRIM_FIFO_SIZE(x)                       ((x) << 0)
+#       define R700_SC_HIZ_TILE_FIFO_SIZE(x)                   ((x) << 12)
+#       define R700_SC_EARLYZ_TILE_FIFO_SIZE(x)                ((x) << 20)
+#define R600_PA_SC_ENHANCE                                     0x8bf0
+#       define R600_FORCE_EOV_MAX_CLK_CNT(x)                   ((x) << 0)
+#       define R600_FORCE_EOV_MAX_TILE_CNT(x)                  ((x) << 12)
+#define R600_PA_SC_CLIPRECT_RULE                               0x2820c
+#define R700_PA_SC_EDGERULE                                    0x28230
+#define R600_PA_SC_LINE_STIPPLE                                0x28a0c
+#define R600_PA_SC_MODE_CNTL                                   0x28a4c
+#define R600_PA_SC_AA_CONFIG                                   0x28c04
+
+#define R600_SX_EXPORT_BUFFER_SIZES                            0x900c
+#       define R600_COLOR_BUFFER_SIZE(x)                       ((x) << 0)
+#       define R600_POSITION_BUFFER_SIZE(x)                    ((x) << 8)
+#       define R600_SMX_BUFFER_SIZE(x)                         ((x) << 16)
+#define R600_SX_DEBUG_1                                        0x9054
+#       define R600_SMX_EVENT_RELEASE                          (1 << 0)
+#       define R600_ENABLE_NEW_SMX_ADDRESS                     (1 << 16)
+#define R700_SX_DEBUG_1                                        0x9058
+#       define R700_ENABLE_NEW_SMX_ADDRESS                     (1 << 16)
+#define R600_SX_MISC                                           0x28350
+
+#define R600_DB_DEBUG                                          0x9830
+#       define R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE              (1U << 31)
+#define R600_DB_WATERMARKS                                     0x9838
+#       define R600_DEPTH_FREE(x)                              ((x) << 0)
+#       define R600_DEPTH_FLUSH(x)                             ((x) << 5)
+#       define R600_DEPTH_PENDING_FREE(x)                      ((x) << 15)
+#       define R600_DEPTH_CACHELINE_FREE(x)                    ((x) << 20)
+#define R700_DB_DEBUG3                                         0x98b0
+#       define R700_DB_CLK_OFF_DELAY(x)                        ((x) << 11)
+#define RV700_DB_DEBUG4                                        0x9b8c
+#       define RV700_DISABLE_TILE_COVERED_FOR_PS_ITER          (1 << 6)
+
+#define R600_VGT_CACHE_INVALIDATION                            0x88c4
+#       define R600_CACHE_INVALIDATION(x)                      ((x) << 0)
+#       define R600_VC_ONLY                                    0
+#       define R600_TC_ONLY                                    1
+#       define R600_VC_AND_TC                                  2
+#       define R700_AUTO_INVLD_EN(x)                           ((x) << 6)
+#       define R700_NO_AUTO                                    0
+#       define R700_ES_AUTO                                    1
+#       define R700_GS_AUTO                                    2
+#       define R700_ES_AND_GS_AUTO                             3
+#define R600_VGT_GS_PER_ES                                     0x88c8
+#define R600_VGT_ES_PER_GS                                     0x88cc
+#define R600_VGT_GS_PER_VS                                     0x88e8
+#define R600_VGT_GS_VERTEX_REUSE                               0x88d4
+#define R600_VGT_NUM_INSTANCES                                 0x8974
+#define R600_VGT_STRMOUT_EN                                    0x28ab0
+#define R600_VGT_EVENT_INITIATOR                               0x28a90
+#       define R600_CACHE_FLUSH_AND_INV_EVENT                  (0x16 << 0)
+#define R600_VGT_VERTEX_REUSE_BLOCK_CNTL                       0x28c58
+#       define R600_VTX_REUSE_DEPTH_MASK                       0xff
+#define R600_VGT_OUT_DEALLOC_CNTL                              0x28c5c
+#       define R600_DEALLOC_DIST_MASK                          0x7f
+
+#define R600_CB_COLOR0_BASE                                    0x28040
+#define R600_CB_COLOR1_BASE                                    0x28044
+#define R600_CB_COLOR2_BASE                                    0x28048
+#define R600_CB_COLOR3_BASE                                    0x2804c
+#define R600_CB_COLOR4_BASE                                    0x28050
+#define R600_CB_COLOR5_BASE                                    0x28054
+#define R600_CB_COLOR6_BASE                                    0x28058
+#define R600_CB_COLOR7_BASE                                    0x2805c
+#define R600_CB_COLOR7_FRAG                                    0x280fc
+
+#define R600_CB_COLOR0_SIZE                                    0x28060
+#define R600_CB_COLOR0_VIEW                                    0x28080
+#define R600_CB_COLOR0_INFO                                    0x280a0
+#define R600_CB_COLOR0_TILE                                    0x280c0
+#define R600_CB_COLOR0_FRAG                                    0x280e0
+#define R600_CB_COLOR0_MASK                                    0x28100
+
+#define AVIVO_D1MODE_VLINE_START_END                           0x6538
+#define AVIVO_D2MODE_VLINE_START_END                           0x6d38
+#define R600_CP_COHER_BASE                                     0x85f8
+#define R600_DB_DEPTH_BASE                                     0x2800c
+#define R600_SQ_PGM_START_FS                                   0x28894
+#define R600_SQ_PGM_START_ES                                   0x28880
+#define R600_SQ_PGM_START_VS                                   0x28858
+#define R600_SQ_PGM_RESOURCES_VS                               0x28868
+#define R600_SQ_PGM_CF_OFFSET_VS                               0x288d0
+#define R600_SQ_PGM_START_GS                                   0x2886c
+#define R600_SQ_PGM_START_PS                                   0x28840
+#define R600_SQ_PGM_RESOURCES_PS                               0x28850
+#define R600_SQ_PGM_EXPORTS_PS                                 0x28854
+#define R600_SQ_PGM_CF_OFFSET_PS                               0x288cc
+#define R600_VGT_DMA_BASE                                      0x287e8
+#define R600_VGT_DMA_BASE_HI                                   0x287e4
+#define R600_VGT_STRMOUT_BASE_OFFSET_0                         0x28b10
+#define R600_VGT_STRMOUT_BASE_OFFSET_1                         0x28b14
+#define R600_VGT_STRMOUT_BASE_OFFSET_2                         0x28b18
+#define R600_VGT_STRMOUT_BASE_OFFSET_3                         0x28b1c
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_0                      0x28b44
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_1                      0x28b48
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_2                      0x28b4c
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_3                      0x28b50
+#define R600_VGT_STRMOUT_BUFFER_BASE_0                         0x28ad8
+#define R600_VGT_STRMOUT_BUFFER_BASE_1                         0x28ae8
+#define R600_VGT_STRMOUT_BUFFER_BASE_2                         0x28af8
+#define R600_VGT_STRMOUT_BUFFER_BASE_3                         0x28b08
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_0                       0x28adc
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_1                       0x28aec
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_2                       0x28afc
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_3                       0x28b0c
+
+#define R600_VGT_PRIMITIVE_TYPE                                0x8958
+
+#define R600_PA_SC_SCREEN_SCISSOR_TL                           0x28030
+#define R600_PA_SC_GENERIC_SCISSOR_TL                          0x28240
+#define R600_PA_SC_WINDOW_SCISSOR_TL                           0x28204
+
+#define R600_TC_CNTL                                           0x9608
+#       define R600_TC_L2_SIZE(x)                              ((x) << 5)
+#       define R600_L2_DISABLE_LATE_HIT                        (1 << 9)
+
+#define R600_ARB_POP                                           0x2418
+#       define R600_ENABLE_TC128                               (1 << 30)
+#define R600_ARB_GDEC_RD_CNTL                                  0x246c
+
+#define R600_TA_CNTL_AUX                                       0x9508
+#       define R600_DISABLE_CUBE_WRAP                          (1 << 0)
+#       define R600_DISABLE_CUBE_ANISO                         (1 << 1)
+#       define R700_GETLOD_SELECT(x)                           ((x) << 2)
+#       define R600_SYNC_GRADIENT                              (1 << 24)
+#       define R600_SYNC_WALKER                                (1 << 25)
+#       define R600_SYNC_ALIGNER                               (1 << 26)
+#       define R600_BILINEAR_PRECISION_6_BIT                   (0 << 31)
+#       define R600_BILINEAR_PRECISION_8_BIT                   (1U << 31)
+
+#define R700_TCP_CNTL                                          0x9610
+
+#define R600_SMX_DC_CTL0                                       0xa020
+#       define R700_USE_HASH_FUNCTION                          (1 << 0)
+#       define R700_CACHE_DEPTH(x)                             ((x) << 1)
+#       define R700_FLUSH_ALL_ON_EVENT                         (1 << 10)
+#       define R700_STALL_ON_EVENT                             (1 << 11)
+#define R700_SMX_EVENT_CTL                                     0xa02c
+#       define R700_ES_FLUSH_CTL(x)                            ((x) << 0)
+#       define R700_GS_FLUSH_CTL(x)                            ((x) << 3)
+#       define R700_ACK_FLUSH_CTL(x)                           ((x) << 6)
+#       define R700_SYNC_FLUSH_CTL                             (1 << 8)
+
+#define R600_SQ_CONFIG                                         0x8c00
+#       define R600_VC_ENABLE                                  (1 << 0)
+#       define R600_EXPORT_SRC_C                               (1 << 1)
+#       define R600_DX9_CONSTS                                 (1 << 2)
+#       define R600_ALU_INST_PREFER_VECTOR                     (1 << 3)
+#       define R600_DX10_CLAMP                                 (1 << 4)
+#       define R600_CLAUSE_SEQ_PRIO(x)                         ((x) << 8)
+#       define R600_PS_PRIO(x)                                 ((x) << 24)
+#       define R600_VS_PRIO(x)                                 ((x) << 26)
+#       define R600_GS_PRIO(x)                                 ((x) << 28)
+#       define R600_ES_PRIO(x)                                 ((x) << 30)
+#define R600_SQ_GPR_RESOURCE_MGMT_1                            0x8c04
+#       define R600_NUM_PS_GPRS(x)                             ((x) << 0)
+#       define R600_NUM_VS_GPRS(x)                             ((x) << 16)
+#       define R700_DYN_GPR_ENABLE                             (1 << 27)
+#       define R600_NUM_CLAUSE_TEMP_GPRS(x)                    ((x) << 28)
+#define R600_SQ_GPR_RESOURCE_MGMT_2                            0x8c08
+#       define R600_NUM_GS_GPRS(x)                             ((x) << 0)
+#       define R600_NUM_ES_GPRS(x)                             ((x) << 16)
+#define R600_SQ_THREAD_RESOURCE_MGMT                           0x8c0c
+#       define R600_NUM_PS_THREADS(x)                          ((x) << 0)
+#       define R600_NUM_VS_THREADS(x)                          ((x) << 8)
+#       define R600_NUM_GS_THREADS(x)                          ((x) << 16)
+#       define R600_NUM_ES_THREADS(x)                          ((x) << 24)
+#define R600_SQ_STACK_RESOURCE_MGMT_1                          0x8c10
+#       define R600_NUM_PS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define R600_NUM_VS_STACK_ENTRIES(x)                    ((x) << 16)
+#define R600_SQ_STACK_RESOURCE_MGMT_2                          0x8c14
+#       define R600_NUM_GS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define R600_NUM_ES_STACK_ENTRIES(x)                    ((x) << 16)
+#define R600_SQ_MS_FIFO_SIZES                                  0x8cf0
+#       define R600_CACHE_FIFO_SIZE(x)                         ((x) << 0)
+#       define R600_FETCH_FIFO_HIWATER(x)                      ((x) << 8)
+#       define R600_DONE_FIFO_HIWATER(x)                       ((x) << 16)
+#       define R600_ALU_UPDATE_FIFO_HIWATER(x)                 ((x) << 24)
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_0                         0x8db0
+#       define R700_SIMDA_RING0(x)                             ((x) << 0)
+#       define R700_SIMDA_RING1(x)                             ((x) << 8)
+#       define R700_SIMDB_RING0(x)                             ((x) << 16)
+#       define R700_SIMDB_RING1(x)                             ((x) << 24)
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_1                         0x8db4
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_2                         0x8db8
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_3                         0x8dbc
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_4                         0x8dc0
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_5                         0x8dc4
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_6                         0x8dc8
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_7                         0x8dcc
+
+#define R600_SPI_PS_IN_CONTROL_0                               0x286cc
+#       define R600_NUM_INTERP(x)                              ((x) << 0)
+#       define R600_POSITION_ENA                               (1 << 8)
+#       define R600_POSITION_CENTROID                          (1 << 9)
+#       define R600_POSITION_ADDR(x)                           ((x) << 10)
+#       define R600_PARAM_GEN(x)                               ((x) << 15)
+#       define R600_PARAM_GEN_ADDR(x)                          ((x) << 19)
+#       define R600_BARYC_SAMPLE_CNTL(x)                       ((x) << 26)
+#       define R600_PERSP_GRADIENT_ENA                         (1 << 28)
+#       define R600_LINEAR_GRADIENT_ENA                        (1 << 29)
+#       define R600_POSITION_SAMPLE                            (1 << 30)
+#       define R600_BARYC_AT_SAMPLE_ENA                        (1U << 31)
+#define R600_SPI_PS_IN_CONTROL_1                               0x286d0
+#       define R600_GEN_INDEX_PIX                              (1 << 0)
+#       define R600_GEN_INDEX_PIX_ADDR(x)                      ((x) << 1)
+#       define R600_FRONT_FACE_ENA                             (1 << 8)
+#       define R600_FRONT_FACE_CHAN(x)                         ((x) << 9)
+#       define R600_FRONT_FACE_ALL_BITS                        (1 << 11)
+#       define R600_FRONT_FACE_ADDR(x)                         ((x) << 12)
+#       define R600_FOG_ADDR(x)                                ((x) << 17)
+#       define R600_FIXED_PT_POSITION_ENA                      (1 << 24)
+#       define R600_FIXED_PT_POSITION_ADDR(x)                  ((x) << 25)
+#       define R700_POSITION_ULC                               (1 << 30)
+#define R600_SPI_INPUT_Z                                       0x286d8
+
+#define R600_SPI_CONFIG_CNTL                                   0x9100
+#       define R600_GPR_WRITE_PRIORITY(x)                      ((x) << 0)
+#       define R600_DISABLE_INTERP_1                           (1 << 5)
+#define R600_SPI_CONFIG_CNTL_1                                 0x913c
+#       define R600_VTX_DONE_DELAY(x)                          ((x) << 0)
+#       define R600_INTERP_ONE_PRIM_PER_ROW                    (1 << 4)
+
+#define R600_GB_TILING_CONFIG                                  0x98f0
+#       define R600_PIPE_TILING(x)                             ((x) << 1)
+#       define R600_BANK_TILING(x)                             ((x) << 4)
+#       define R600_GROUP_SIZE(x)                              ((x) << 6)
+#       define R600_ROW_TILING(x)                              ((x) << 8)
+#       define R600_BANK_SWAPS(x)                              ((x) << 11)
+#       define R600_SAMPLE_SPLIT(x)                            ((x) << 14)
+#       define R600_BACKEND_MAP(x)                             ((x) << 16)
+#define R600_DCP_TILING_CONFIG                                 0x6ca0
+#define R600_HDP_TILING_CONFIG                                 0x2f3c
+
+#define R600_CC_RB_BACKEND_DISABLE                             0x98f4
+#define R700_CC_SYS_RB_BACKEND_DISABLE                         0x3f88
+#       define R600_BACKEND_DISABLE(x)                         ((x) << 16)
+
+#define R600_CC_GC_SHADER_PIPE_CONFIG                          0x8950
+#define R600_GC_USER_SHADER_PIPE_CONFIG                        0x8954
+#       define R600_INACTIVE_QD_PIPES(x)                       ((x) << 8)
+#       define R600_INACTIVE_QD_PIPES_MASK                     (0xff << 8)
+#       define R600_INACTIVE_SIMDS(x)                          ((x) << 16)
+#       define R600_INACTIVE_SIMDS_MASK                        (0xff << 16)
+
+#define R700_CGTS_SYS_TCC_DISABLE                              0x3f90
+#define R700_CGTS_USER_SYS_TCC_DISABLE                         0x3f94
+#define R700_CGTS_TCC_DISABLE                                  0x9148
+#define R700_CGTS_USER_TCC_DISABLE                             0x914c
+
+/* Constants */
+#define RADEON_MAX_USEC_TIMEOUT		100000	/* 100 ms */
+
+#define RADEON_LAST_FRAME_REG		RADEON_SCRATCH_REG0
+#define RADEON_LAST_DISPATCH_REG	RADEON_SCRATCH_REG1
+#define RADEON_LAST_CLEAR_REG		RADEON_SCRATCH_REG2
+#define RADEON_LAST_SWI_REG		RADEON_SCRATCH_REG3
+#define RADEON_LAST_DISPATCH		1
+
+#define R600_LAST_FRAME_REG		R600_SCRATCH_REG0
+#define R600_LAST_DISPATCH_REG	        R600_SCRATCH_REG1
+#define R600_LAST_CLEAR_REG		R600_SCRATCH_REG2
+#define R600_LAST_SWI_REG		R600_SCRATCH_REG3
+
+#define RADEON_MAX_VB_AGE		0x7fffffff
+#define RADEON_MAX_VB_VERTS		(0xffff)
+
+#define RADEON_RING_HIGH_MARK		128
+
+#define RADEON_PCIGART_TABLE_SIZE      (32*1024)
+
+#define RADEON_READ(reg)	DRM_READ32(  dev_priv->mmio, (reg) )
+#define RADEON_WRITE(reg, val)                                          \
+do {									\
+	if (reg < 0x10000) {				                \
+		DRM_WRITE32(dev_priv->mmio, (reg), (val));		\
+	} else {                                                        \
+		DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, (reg));	\
+		DRM_WRITE32(dev_priv->mmio, RADEON_MM_DATA, (val));	\
+	}                                                               \
+} while (0)
+#define RADEON_READ8(reg)	DRM_READ8(  dev_priv->mmio, (reg) )
+#define RADEON_WRITE8(reg,val)	DRM_WRITE8( dev_priv->mmio, (reg), (val) )
+
+#define RADEON_WRITE_PLL(addr, val)					\
+do {									\
+	RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX,				\
+		       ((addr) & 0x1f) | RADEON_PLL_WR_EN );		\
+	RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val));			\
+} while (0)
+
+#define RADEON_WRITE_PCIE(addr, val)					\
+do {									\
+	RADEON_WRITE8(RADEON_PCIE_INDEX,				\
+			((addr) & 0xff));				\
+	RADEON_WRITE(RADEON_PCIE_DATA, (val));			\
+} while (0)
+
+#define R500_WRITE_MCIND(addr, val)					\
+do {								\
+	RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff));	\
+	RADEON_WRITE(R520_MC_IND_DATA, (val));			\
+	RADEON_WRITE(R520_MC_IND_INDEX, 0);	\
+} while (0)
+
+#define RS480_WRITE_MCIND(addr, val)				\
+do {									\
+	RADEON_WRITE(RS480_NB_MC_INDEX,				\
+			((addr) & 0xff) | RS480_NB_MC_IND_WR_EN);	\
+	RADEON_WRITE(RS480_NB_MC_DATA, (val));			\
+	RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);			\
+} while (0)
+
+#define RS690_WRITE_MCIND(addr, val)					\
+do {								\
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK));	\
+	RADEON_WRITE(RS690_MC_DATA, val);			\
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);	\
+} while (0)
+
+#define RS600_WRITE_MCIND(addr, val)				\
+do {							        \
+	RADEON_WRITE(RS600_MC_INDEX, RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | ((addr) & RS600_MC_ADDR_MASK)); \
+	RADEON_WRITE(RS600_MC_DATA, val);                       \
+} while (0)
+
+#define IGP_WRITE_MCIND(addr, val)				\
+do {									\
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||   \
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))      \
+		RS690_WRITE_MCIND(addr, val);				\
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)  \
+		RS600_WRITE_MCIND(addr, val);				\
+	else								\
+		RS480_WRITE_MCIND(addr, val);				\
+} while (0)
+
+#define CP_PACKET0( reg, n )						\
+	(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET0_TABLE( reg, n )					\
+	(RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET1( reg0, reg1 )					\
+	(RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
+#define CP_PACKET2()							\
+	(RADEON_CP_PACKET2)
+#define CP_PACKET3( pkt, n )						\
+	(RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
+
+/* ================================================================
+ * Engine control helper macros
+ */
+
+#define RADEON_WAIT_UNTIL_2D_IDLE() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_3D_IDLE() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_3D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_IDLE() do {					\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |				\
+		   RADEON_WAIT_3D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( RADEON_WAIT_CRTC_PFLIP );				\
+} while (0)
+
+#define RADEON_FLUSH_CACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_DC_FLUSH);				\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_RB3D_DC_FLUSH);				\
+	}                                                               \
+} while (0)
+
+#define RADEON_PURGE_CACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE);	\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);	\
+	}                                                               \
+} while (0)
+
+#define RADEON_FLUSH_ZCACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_ZC_FLUSH);				\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_ZC_FLUSH);				\
+	}                                                               \
+} while (0)
+
+#define RADEON_PURGE_ZCACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE);			\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE);				\
+	}                                                               \
+} while (0)
+
+/* ================================================================
+ * Misc helper macros
+ */
+
+/* Perfbox functionality only.
+ */
+#define RING_SPACE_TEST_WITH_RETURN( dev_priv )				\
+do {									\
+	if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) {		\
+		u32 head = GET_RING_HEAD( dev_priv );			\
+		if (head == dev_priv->ring.tail)			\
+			dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE;	\
+	}								\
+} while (0)
+
+#define VB_AGE_TEST_WITH_RETURN( dev_priv )				\
+do {								\
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;	\
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;	\
+	if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) {		\
+		int __ret;						\
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \
+			__ret = r600_do_cp_idle(dev_priv);		\
+		else							\
+			__ret = radeon_do_cp_idle(dev_priv);		\
+		if ( __ret ) return __ret;				\
+		sarea_priv->last_dispatch = 0;				\
+		radeon_freelist_reset( dev );				\
+	}								\
+} while (0)
+
+#define RADEON_DISPATCH_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+#define RADEON_FRAME_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+#define RADEON_CLEAR_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+#define R600_DISPATCH_AGE(age) do {					\
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));		\
+	OUT_RING((R600_LAST_DISPATCH_REG - R600_SET_CONFIG_REG_OFFSET) >> 2);  \
+	OUT_RING(age);							\
+} while (0)
+
+#define R600_FRAME_AGE(age) do {					\
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));		\
+	OUT_RING((R600_LAST_FRAME_REG - R600_SET_CONFIG_REG_OFFSET) >> 2);  \
+	OUT_RING(age);							\
+} while (0)
+
+#define R600_CLEAR_AGE(age) do {					\
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));		\
+	OUT_RING((R600_LAST_CLEAR_REG - R600_SET_CONFIG_REG_OFFSET) >> 2);  \
+	OUT_RING(age);							\
+} while (0)
+
+/* ================================================================
+ * Ring control
+ */
+
+#define RADEON_VERBOSE	0
+
+#define RING_LOCALS	int write, _nr, _align_nr; unsigned int mask; u32 *ring;
+
+#define RADEON_RING_ALIGN 16
+
+#define BEGIN_RING( n ) do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "BEGIN_RING( %d )\n", (n));			\
+	}								\
+	_align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1));	\
+	_align_nr += n;							\
+	if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) {	\
+                COMMIT_RING();						\
+		radeon_wait_ring( dev_priv, _align_nr * sizeof(u32));	\
+	}								\
+	_nr = n; dev_priv->ring.space -= (n) * sizeof(u32);		\
+	ring = dev_priv->ring.start;					\
+	write = dev_priv->ring.tail;					\
+	mask = dev_priv->ring.tail_mask;				\
+} while (0)
+
+#define ADVANCE_RING() do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",	\
+			  write, dev_priv->ring.tail );			\
+	}								\
+	if (((dev_priv->ring.tail + _nr) & mask) != write) {		\
+		DRM_ERROR(						\
+			"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",	\
+			((dev_priv->ring.tail + _nr) & mask),		\
+			write, __LINE__);				\
+	} else								\
+		dev_priv->ring.tail = write;				\
+} while (0)
+
+extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
+
+#define COMMIT_RING() do {						\
+		radeon_commit_ring(dev_priv);				\
+	} while(0)
+
+#define OUT_RING( x ) do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",		\
+			   (unsigned int)(x), write );			\
+	}								\
+	ring[write++] = (x);						\
+	write &= mask;							\
+} while (0)
+
+#define OUT_RING_REG( reg, val ) do {					\
+	OUT_RING( CP_PACKET0( reg, 0 ) );				\
+	OUT_RING( val );						\
+} while (0)
+
+#define OUT_RING_TABLE( tab, sz ) do {					\
+	int _size = (sz);					\
+	int *_tab = (int *)(tab);				\
+								\
+	if (write + _size > mask) {				\
+		int _i = (mask+1) - write;			\
+		_size -= _i;					\
+		while (_i > 0 ) {				\
+			*(int *)(ring + write) = *_tab++;	\
+			write++;				\
+			_i--;					\
+		}						\
+		write = 0;					\
+		_tab += _i;					\
+	}							\
+	while (_size > 0) {					\
+		*(ring + write) = *_tab++;			\
+		write++;					\
+		_size--;					\
+	}							\
+	write &= mask;						\
+} while (0)
+
+/**
+ * Copy given number of dwords from drm buffer to the ring buffer.
+ */
+#define OUT_RING_DRM_BUFFER(buf, sz) do {				\
+	int _size = (sz) * 4;						\
+	struct drm_buffer *_buf = (buf);				\
+	int _part_size;							\
+	while (_size > 0) {						\
+		_part_size = _size;					\
+									\
+		if (write + _part_size/4 > mask)			\
+			_part_size = ((mask + 1) - write)*4;		\
+									\
+		if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE)	\
+			_part_size = PAGE_SIZE - drm_buffer_index(_buf);\
+									\
+									\
+									\
+		memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)]	\
+			[drm_buffer_index(_buf)], _part_size);		\
+									\
+		_size -= _part_size;					\
+		write = (write + _part_size/4) & mask;			\
+		drm_buffer_advance(_buf, _part_size);			\
+	}								\
+} while (0)
+
+
+#endif				/* __RADEON_DRV_H__ */


Property changes on: trunk/sys/dev/drm2/radeon/radeon_drv.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_encoders.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_encoders.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_encoders.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,382 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_encoders.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *clone_encoder;
+	uint32_t index_mask = 0;
+	int count;
+
+	/* DIG routing gets problematic */
+	if (rdev->family >= CHIP_R600)
+		return index_mask;
+	/* LVDS/TV are too wacky */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return index_mask;
+	/* DVO requires 2x ppll clocks depending on tmds chip */
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+		return index_mask;
+
+	count = -1;
+	list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+		count++;
+
+		if (clone_encoder == encoder)
+			continue;
+		if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			continue;
+		if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+			continue;
+		else
+			index_mask |= (1 << count);
+	}
+	return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		encoder->possible_clones = radeon_encoder_clones(encoder);
+	}
+}
+
+uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t ret = 0;
+
+	switch (supported_device) {
+	case ATOM_DEVICE_CRT1_SUPPORT:
+	case ATOM_DEVICE_TV1_SUPPORT:
+	case ATOM_DEVICE_TV2_SUPPORT:
+	case ATOM_DEVICE_CRT2_SUPPORT:
+	case ATOM_DEVICE_CV_SUPPORT:
+		switch (dac) {
+		case 1: /* dac a */
+			if ((rdev->family == CHIP_RS300) ||
+			    (rdev->family == CHIP_RS400) ||
+			    (rdev->family == CHIP_RS480))
+				ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+			else if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
+			else
+				ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
+			break;
+		case 2: /* dac b */
+			if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
+			else {
+				/*if (rdev->family == CHIP_R200)
+				  ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+				  else*/
+				ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+			}
+			break;
+		case 3: /* external dac */
+			if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+			else
+				ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+			break;
+		}
+		break;
+	case ATOM_DEVICE_LCD1_SUPPORT:
+		if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_DFP1_SUPPORT:
+		if ((rdev->family == CHIP_RS300) ||
+		    (rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480))
+			ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+		else if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_LCD2_SUPPORT:
+	case ATOM_DEVICE_DFP2_SUPPORT:
+		if ((rdev->family == CHIP_RS600) ||
+		    (rdev->family == CHIP_RS690) ||
+		    (rdev->family == CHIP_RS740))
+			ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
+		else if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_DFP3_SUPPORT:
+		ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+		break;
+	}
+
+	return ret;
+}
+
+void
+radeon_link_encoder_connector(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* walk the list and link encoders to connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			radeon_encoder = to_radeon_encoder(encoder);
+			if (radeon_encoder->devices & radeon_connector->devices) {
+				drm_mode_connector_attach_encoder(connector, encoder);
+				if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+					if (rdev->is_atom_bios)
+						radeon_atom_backlight_init(radeon_encoder, connector);
+					else
+						radeon_legacy_backlight_init(radeon_encoder, connector);
+					rdev->mode_info.bl_encoder = radeon_encoder;
+				}
+			}
+		}
+	}
+}
+
+void radeon_encoder_set_active_device(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
+			DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
+				  radeon_encoder->active_device, radeon_encoder->devices,
+				  radeon_connector->devices, encoder->encoder_type);
+		}
+	}
+}
+
+struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_encoder->active_device & radeon_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_encoder->devices & radeon_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *other_encoder;
+	struct radeon_encoder *other_radeon_encoder;
+
+	if (radeon_encoder->is_ext_encoder)
+		return NULL;
+
+	list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+		if (other_encoder == encoder)
+			continue;
+		other_radeon_encoder = to_radeon_encoder(other_encoder);
+		if (other_radeon_encoder->is_ext_encoder &&
+		    (radeon_encoder->devices & other_radeon_encoder->devices))
+			return other_encoder;
+	}
+	return NULL;
+}
+
+u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
+{
+	struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
+
+	if (other_encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_TRAVIS:
+		case ENCODER_OBJECT_ID_NUTMEG:
+			return radeon_encoder->encoder_id;
+		default:
+			return ENCODER_OBJECT_ID_NONE;
+		}
+	}
+	return ENCODER_OBJECT_ID_NONE;
+}
+
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	unsigned hblank = native_mode->htotal - native_mode->hdisplay;
+	unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
+	unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
+	unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
+	unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+	unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+
+	adjusted_mode->clock = native_mode->clock;
+	adjusted_mode->flags = native_mode->flags;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		adjusted_mode->hdisplay = native_mode->hdisplay;
+		adjusted_mode->vdisplay = native_mode->vdisplay;
+	}
+
+	adjusted_mode->htotal = native_mode->hdisplay + hblank;
+	adjusted_mode->hsync_start = native_mode->hdisplay + hover;
+	adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
+
+	adjusted_mode->vtotal = native_mode->vdisplay + vblank;
+	adjusted_mode->vsync_start = native_mode->vdisplay + vover;
+	adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
+		adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
+	}
+
+	adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
+	adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
+	adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
+
+	adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
+	adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
+	adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
+
+}
+
+bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+				    u32 pixel_clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+
+	connector = radeon_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	radeon_connector = to_radeon_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		if (radeon_connector->use_digital) {
+			/* HDMI 1.3 supports up to 340 Mhz over single link */
+			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+				if (pixel_clock > 340000)
+					return true;
+				else
+					return false;
+			} else {
+				if (pixel_clock > 165000)
+					return true;
+				else
+					return false;
+			}
+		} else
+			return false;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return false;
+		else {
+			/* HDMI 1.3 supports up to 340 Mhz over single link */
+			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+				if (pixel_clock > 340000)
+					return true;
+				else
+					return false;
+			} else {
+				if (pixel_clock > 165000)
+					return true;
+				else
+					return false;
+			}
+		}
+	default:
+		return false;
+	}
+}
+


Property changes on: trunk/sys/dev/drm2/radeon/radeon_encoders.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_family.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_family.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_family.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,118 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_family.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* this file defines the CHIP_  and family flags used in the pciids,
+ * its is common between kms and non-kms because duplicating it and
+ * changing one place is fail.
+ */
+#ifndef RADEON_FAMILY_H
+#define RADEON_FAMILY_H
+/*
+ * Radeon chip families
+ */
+enum radeon_family {
+	CHIP_R100 = 0,
+	CHIP_RV100,
+	CHIP_RS100,
+	CHIP_RV200,
+	CHIP_RS200,
+	CHIP_R200,
+	CHIP_RV250,
+	CHIP_RS300,
+	CHIP_RV280,
+	CHIP_R300,
+	CHIP_R350,
+	CHIP_RV350,
+	CHIP_RV380,
+	CHIP_R420,
+	CHIP_R423,
+	CHIP_RV410,
+	CHIP_RS400,
+	CHIP_RS480,
+	CHIP_RS600,
+	CHIP_RS690,
+	CHIP_RS740,
+	CHIP_RV515,
+	CHIP_R520,
+	CHIP_RV530,
+	CHIP_RV560,
+	CHIP_RV570,
+	CHIP_R580,
+	CHIP_R600,
+	CHIP_RV610,
+	CHIP_RV630,
+	CHIP_RV670,
+	CHIP_RV620,
+	CHIP_RV635,
+	CHIP_RS780,
+	CHIP_RS880,
+	CHIP_RV770,
+	CHIP_RV730,
+	CHIP_RV710,
+	CHIP_RV740,
+	CHIP_CEDAR,
+	CHIP_REDWOOD,
+	CHIP_JUNIPER,
+	CHIP_CYPRESS,
+	CHIP_HEMLOCK,
+	CHIP_PALM,
+	CHIP_SUMO,
+	CHIP_SUMO2,
+	CHIP_BARTS,
+	CHIP_TURKS,
+	CHIP_CAICOS,
+	CHIP_CAYMAN,
+	CHIP_ARUBA,
+	CHIP_TAHITI,
+	CHIP_PITCAIRN,
+	CHIP_VERDE,
+	CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum radeon_chip_flags {
+	RADEON_FAMILY_MASK = 0x0000ffffUL,
+	RADEON_FLAGS_MASK = 0xffff0000UL,
+	RADEON_IS_MOBILITY = 0x00010000UL,
+	RADEON_IS_IGP = 0x00020000UL,
+	RADEON_SINGLE_CRTC = 0x00040000UL,
+	RADEON_IS_AGP = 0x00080000UL,
+	RADEON_HAS_HIERZ = 0x00100000UL,
+	RADEON_IS_PCIE = 0x00200000UL,
+	RADEON_NEW_MEMMAP = 0x00400000UL,
+	RADEON_IS_PCI = 0x00800000UL,
+	RADEON_IS_IGPGART = 0x01000000UL,
+};
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_family.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_fb.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_fb.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_fb.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,404 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_fb.c 300060 2016-05-17 15:18:01Z pfg $");
+
+#include <machine/_inttypes.h>
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+
+#include <dev/drm2/drm_fb_helper.h>
+
+/* object hierarchy -
+   this contains a helper + a radeon fb
+   the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
+	struct drm_fb_helper helper;
+	struct radeon_framebuffer rfb;
+	struct list_head fbdev_list;
+	struct radeon_device *rdev;
+};
+
+#if defined(__linux__)
+static struct fb_ops radeonfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+#endif
+
+
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+{
+	int aligned = width;
+	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
+	int pitch_mask = 0;
+
+	switch (bpp / 8) {
+	case 1:
+		pitch_mask = align_large ? 255 : 127;
+		break;
+	case 2:
+		pitch_mask = align_large ? 127 : 31;
+		break;
+	case 3:
+	case 4:
+		pitch_mask = align_large ? 63 : 15;
+		break;
+	}
+
+	aligned += pitch_mask;
+	aligned &= ~pitch_mask;
+	return aligned;
+}
+
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
+	int ret;
+
+	ret = radeon_bo_reserve(rbo, false);
+	if (likely(ret == 0)) {
+		radeon_bo_kunmap(rbo);
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+	drm_gem_object_unreference_unlocked(gobj);
+}
+
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+					 struct drm_mode_fb_cmd2 *mode_cmd,
+					 struct drm_gem_object **gobj_p)
+{
+	struct radeon_device *rdev = rfbdev->rdev;
+	struct drm_gem_object *gobj = NULL;
+	struct radeon_bo *rbo = NULL;
+	bool fb_tiled = false; /* useful for testing */
+	u32 tiling_flags = 0;
+	int ret;
+	int aligned_size, size;
+	int height = mode_cmd->height;
+	u32 bpp, depth;
+
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	/* need to align pitch with crtc limits */
+	mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
+						  fb_tiled) * ((bpp + 1) / 8);
+
+	if (rdev->family >= CHIP_R600)
+		height = roundup2(mode_cmd->height, 8);
+	size = mode_cmd->pitches[0] * height;
+	aligned_size = roundup2(size, PAGE_SIZE);
+	ret = radeon_gem_object_create(rdev, aligned_size, 0,
+				       RADEON_GEM_DOMAIN_VRAM,
+				       false, true,
+				       &gobj);
+	if (ret) {
+		DRM_ERROR("failed to allocate framebuffer (%d)\n",
+		       aligned_size);
+		return -ENOMEM;
+	}
+	rbo = gem_to_radeon_bo(gobj);
+
+	if (fb_tiled)
+		tiling_flags = RADEON_TILING_MACRO;
+
+#ifdef __BIG_ENDIAN
+	switch (bpp) {
+	case 32:
+		tiling_flags |= RADEON_TILING_SWAP_32BIT;
+		break;
+	case 16:
+		tiling_flags |= RADEON_TILING_SWAP_16BIT;
+	default:
+		break;
+	}
+#endif
+
+	if (tiling_flags) {
+		ret = radeon_bo_set_tiling_flags(rbo,
+						 tiling_flags | RADEON_TILING_SURFACE,
+						 mode_cmd->pitches[0]);
+		if (ret)
+			dev_err(rdev->dev, "FB failed to set tiling flags\n");
+	}
+
+
+	ret = radeon_bo_reserve(rbo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+	/* Only 27 bit offset for legacy CRTC */
+	ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+				       NULL);
+	if (ret) {
+		radeon_bo_unreserve(rbo);
+		goto out_unref;
+	}
+	if (fb_tiled)
+		radeon_bo_check_tiling(rbo, 0, 0);
+	ret = radeon_bo_kmap(rbo, NULL);
+	radeon_bo_unreserve(rbo);
+	if (ret) {
+		goto out_unref;
+	}
+
+	*gobj_p = gobj;
+	return 0;
+out_unref:
+	radeonfb_destroy_pinned_object(gobj);
+	*gobj_p = NULL;
+	return ret;
+}
+
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct radeon_device *rdev = rfbdev->rdev;
+	struct fb_info *info;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_gem_object *gobj = NULL;
+	struct radeon_bo *rbo = NULL;
+	int ret;
+	unsigned long tmp;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	/* avivo can't scanout real 24bpp */
+	if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+		sizes->surface_bpp = 32;
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon object %d\n", ret);
+		return ret;
+	}
+
+	rbo = gem_to_radeon_bo(gobj);
+
+	/* okay we have an object now allocate the framebuffer */
+	info = framebuffer_alloc();
+	if (info == NULL) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+	if (ret) {
+		DRM_ERROR("failed to initialise framebuffer %d\n", ret);
+		goto out_unref;
+	}
+
+	fb = &rfbdev->rfb.base;
+
+	/* setup helper */
+	rfbdev->helper.fb = fb;
+	rfbdev->helper.fbdev = info;
+
+	memset(rbo->kptr, 0x0, radeon_bo_size(rbo));
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+	tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
+	info->fb_size  = radeon_bo_size(rbo);
+	info->fb_bpp = sizes->surface_bpp;
+	info->fb_pbase = rdev->mc.aper_base + tmp;
+	info->fb_vbase = (vm_offset_t)rbo->kptr;
+
+	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	DRM_INFO("fb mappable at 0x%" PRIXPTR "\n",  info->fb_pbase);
+	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
+	DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
+	DRM_INFO("fb depth is %d\n", fb->depth);
+	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
+
+	return 0;
+
+out_unref:
+	if (rbo) {
+		/* TODO? dumbbell@ */
+	}
+	if (fb && ret) {
+		drm_gem_object_unreference(gobj);
+		drm_framebuffer_cleanup(fb);
+		free(fb, DRM_MEM_DRIVER); /* XXX malloc'd in radeon_user_framebuffer_create? */
+	}
+	return ret;
+}
+
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+					   struct drm_fb_helper_surface_size *sizes)
+{
+	struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
+
+	if (!helper->fb) {
+		ret = radeonfb_create(rfbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
+}
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
+{
+	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+}
+
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
+{
+	struct fb_info *info;
+	struct radeon_framebuffer *rfb = &rfbdev->rfb;
+
+	if (rfbdev->helper.fbdev) {
+		info = rfbdev->helper.fbdev;
+		framebuffer_release(info);
+	}
+
+	if (rfb->obj) {
+		radeonfb_destroy_pinned_object(rfb->obj);
+		rfb->obj = NULL;
+	}
+	drm_fb_helper_fini(&rfbdev->helper);
+	drm_framebuffer_cleanup(&rfb->base);
+
+	return 0;
+}
+
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+	.gamma_set = radeon_crtc_fb_gamma_set,
+	.gamma_get = radeon_crtc_fb_gamma_get,
+	.fb_probe = radeon_fb_find_or_create_single,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+	struct radeon_fbdev *rfbdev;
+	int bpp_sel = 32;
+	int ret;
+
+	/* select 8 bpp console on RN50 or 16MB cards */
+	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+		bpp_sel = 8;
+
+	rfbdev = malloc(sizeof(struct radeon_fbdev),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!rfbdev)
+		return -ENOMEM;
+
+	rfbdev->rdev = rdev;
+	rdev->mode_info.rfbdev = rfbdev;
+	rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+				 rdev->num_crtc,
+				 RADEONFB_CONN_LIMIT);
+	if (ret) {
+		free(rfbdev, DRM_MEM_DRIVER);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+	drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+	return 0;
+}
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+	if (!rdev->mode_info.rfbdev)
+		return;
+
+	radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+	free(rdev->mode_info.rfbdev, DRM_MEM_DRIVER);
+	rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+#ifdef FREEBSD_WIP
+	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+#endif /* FREEBSD_WIP */
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+	struct radeon_bo *robj;
+	int size = 0;
+
+	robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
+	size += radeon_bo_size(robj);
+	return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+		return true;
+	return false;
+}
+
+struct fb_info *
+radeon_fb_helper_getinfo(device_t kdev)
+{
+	struct drm_device *dev;
+	struct radeon_device *rdev;
+	struct radeon_fbdev *rfbdev;
+	struct fb_info *info;
+
+	dev = device_get_softc(kdev);
+	rdev = dev->dev_private;
+	rfbdev = rdev->mode_info.rfbdev;
+	if (rfbdev == NULL)
+		return (NULL);
+
+	info = rfbdev->helper.fbdev;
+
+	return (info);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_fb.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_fence.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_fence.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_fence.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,988 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse at freedesktop.org>
+ *    Dave Airlie
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_fence.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#ifdef FREEBSD_WIP
+#include "radeon_trace.h"
+#endif /* FREEBSD_WIP */
+
+/*
+ * Fences
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization.  When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the the relevant GPU caches have been flushed.  Whether
+ * we use a scratch register or memory location depends on the asic
+ * and whether writeback is enabled.
+ */
+
+/**
+ * radeon_fence_write - write a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @seq: sequence number to write
+ * @ring: ring index the fence is associated with
+ *
+ * Writes a fence value to memory or a scratch register (all asics).
+ */
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
+{
+	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+		*drv->cpu_addr = cpu_to_le32(seq);
+	} else {
+		WREG32(drv->scratch_reg, seq);
+	}
+}
+
+/**
+ * radeon_fence_read - read a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Reads a fence value from memory or a scratch register (all asics).
+ * Returns the value of the fence read from memory or register.
+ */
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
+{
+	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+	u32 seq = 0;
+
+	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+		seq = le32_to_cpu(*drv->cpu_addr);
+	} else {
+		seq = RREG32(drv->scratch_reg);
+	}
+	return seq;
+}
+
+/**
+ * radeon_fence_emit - emit a fence on the requested ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ * @ring: ring index the fence is associated with
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int radeon_fence_emit(struct radeon_device *rdev,
+		      struct radeon_fence **fence,
+		      int ring)
+{
+	/* we are protected by the ring emission mutex */
+	*fence = malloc(sizeof(struct radeon_fence), DRM_MEM_DRIVER, M_NOWAIT);
+	if ((*fence) == NULL) {
+		return -ENOMEM;
+	}
+	refcount_init(&((*fence)->kref), 1);
+	(*fence)->rdev = rdev;
+	(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
+	(*fence)->ring = ring;
+	radeon_fence_ring_emit(rdev, ring, *fence);
+	CTR2(KTR_DRM, "radeon fence: emit (ring=%d, seq=%d)", ring, (*fence)->seq);
+	return 0;
+}
+
+/**
+ * radeon_fence_process - process a fence
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
+void radeon_fence_process(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq, last_seq, last_emitted;
+	unsigned count_loop = 0;
+	bool wake = false;
+
+	/* Note there is a scenario here for an infinite loop but it's
+	 * very unlikely to happen. For it to happen, the current polling
+	 * process need to be interrupted by another process and another
+	 * process needs to update the last_seq btw the atomic read and
+	 * xchg of the current process.
+	 *
+	 * More over for this to go in infinite loop there need to be
+	 * continuously new fence signaled ie radeon_fence_read needs
+	 * to return a different value each time for both the currently
+	 * polling process and the other process that xchg the last_seq
+	 * btw atomic read and xchg of the current process. And the
+	 * value the other process set as last seq must be higher than
+	 * the seq value we just read. Which means that current process
+	 * need to be interrupted after radeon_fence_read and before
+	 * atomic xchg.
+	 *
+	 * To be even more safe we count the number of time we loop and
+	 * we bail after 10 loop just accepting the fact that we might
+	 * have temporarly set the last_seq not to the true real last
+	 * seq but to an older one.
+	 */
+	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+	do {
+		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
+		seq = radeon_fence_read(rdev, ring);
+		seq |= last_seq & 0xffffffff00000000LL;
+		if (seq < last_seq) {
+			seq &= 0xffffffff;
+			seq |= last_emitted & 0xffffffff00000000LL;
+		}
+
+		if (seq <= last_seq || seq > last_emitted) {
+			break;
+		}
+		/* If we loop over we don't want to return without
+		 * checking if a fence is signaled as it means that the
+		 * seq we just read is different from the previous on.
+		 */
+		wake = true;
+		last_seq = seq;
+		if ((count_loop++) > 10) {
+			/* We looped over too many time leave with the
+			 * fact that we might have set an older fence
+			 * seq then the current real last seq as signaled
+			 * by the hw.
+			 */
+			break;
+		}
+	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
+
+	if (wake) {
+		rdev->fence_drv[ring].last_activity = jiffies;
+		cv_broadcast(&rdev->fence_queue);
+	}
+}
+
+/**
+ * radeon_fence_destroy - destroy a fence
+ *
+ * @kref: fence kref
+ *
+ * Frees the fence object (all asics).
+ */
+static void radeon_fence_destroy(struct radeon_fence *fence)
+{
+
+	free(fence, DRM_MEM_DRIVER);
+}
+
+/**
+ * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ *
+ * @rdev: radeon device pointer
+ * @seq: sequence number
+ * @ring: ring index the fence is associated with
+ *
+ * Check if the last singled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if the fence has signaled (current fence value
+ * is >= requested value) or false if it has not (current fence
+ * value is < the requested value.  Helper function for
+ * radeon_fence_signaled().
+ */
+static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
+				      u64 seq, unsigned ring)
+{
+	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+		return true;
+	}
+	/* poll new last sequence at least once */
+	radeon_fence_process(rdev, ring);
+	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_signaled - check if a fence has signaled
+ *
+ * @fence: radeon fence object
+ *
+ * Check if the requested fence has signaled (all asics).
+ * Returns true if the fence has signaled or false if it has not.
+ */
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+	if (!fence) {
+		return true;
+	}
+	if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
+		return true;
+	}
+	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+		fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence number
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number we want to wait for
+ * @ring: ring index the fence is associated with
+ * @intr: use interruptable sleep
+ * @lock_ring: whether the ring should be locked or not
+ *
+ * Wait for the requested sequence number to be written (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number.  Helper function
+ * for radeon_fence_wait(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected and the ring is
+ * marked as not ready so no further jobs get scheduled until a successful
+ * reset.
+ */
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
+				 unsigned ring, bool intr, bool lock_ring)
+{
+	unsigned long timeout, last_activity;
+	uint64_t seq;
+	unsigned i;
+	bool signaled, fence_queue_locked;
+	int r;
+
+	while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+		if (!rdev->ring[ring].ready) {
+			return -EBUSY;
+		}
+
+		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+		if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
+			/* the normal case, timeout is somewhere before last_activity */
+			timeout = rdev->fence_drv[ring].last_activity - timeout;
+		} else {
+			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
+			 * anyway we will just wait for the minimum amount and then check for a lockup
+			 */
+			timeout = 1;
+		}
+		seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+		/* Save current last activity valuee, used to check for GPU lockups */
+		last_activity = rdev->fence_drv[ring].last_activity;
+
+		CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, seq=%d)",
+		    ring, seq);
+
+		radeon_irq_kms_sw_irq_get(rdev, ring);
+		fence_queue_locked = false;
+		r = 0;
+		while (!(signaled = radeon_fence_seq_signaled(rdev,
+		    target_seq, ring))) {
+			if (!fence_queue_locked) {
+				mtx_lock(&rdev->fence_queue_mtx);
+				fence_queue_locked = true;
+			}
+			if (intr) {
+				r = cv_timedwait_sig(&rdev->fence_queue,
+				    &rdev->fence_queue_mtx,
+				    timeout);
+			} else {
+				r = cv_timedwait(&rdev->fence_queue,
+				    &rdev->fence_queue_mtx,
+				    timeout);
+			}
+			if (r == EINTR)
+				r = ERESTARTSYS;
+			if (r != 0) {
+				if (r == EWOULDBLOCK) {
+					signaled =
+					    radeon_fence_seq_signaled(
+						rdev, target_seq, ring);
+				}
+				break;
+			}
+		}
+		if (fence_queue_locked) {
+			mtx_unlock(&rdev->fence_queue_mtx);
+		}
+		radeon_irq_kms_sw_irq_put(rdev, ring);
+		if (unlikely(r == ERESTARTSYS)) {
+			return -r;
+		}
+		CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, seq=%d)",
+		    ring, seq);
+
+		if (unlikely(!signaled)) {
+#ifndef __FreeBSD__
+			/* we were interrupted for some reason and fence
+			 * isn't signaled yet, resume waiting */
+			if (r) {
+				continue;
+			}
+#endif
+
+			/* check if sequence value has changed since last_activity */
+			if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+				continue;
+			}
+
+			if (lock_ring) {
+				sx_xlock(&rdev->ring_lock);
+			}
+
+			/* test if somebody else has already decided that this is a lockup */
+			if (last_activity != rdev->fence_drv[ring].last_activity) {
+				if (lock_ring) {
+					sx_xunlock(&rdev->ring_lock);
+				}
+				continue;
+			}
+
+			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+				/* good news we believe it's a lockup */
+				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n",
+					 (uintmax_t)target_seq, (uintmax_t)seq);
+
+				/* change last activity so nobody else think there is a lockup */
+				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+					rdev->fence_drv[i].last_activity = jiffies;
+				}
+
+				/* mark the ring as not ready any more */
+				rdev->ring[ring].ready = false;
+				if (lock_ring) {
+					sx_xunlock(&rdev->ring_lock);
+				}
+				return -EDEADLK;
+			}
+
+			if (lock_ring) {
+				sx_xunlock(&rdev->ring_lock);
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_wait - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+{
+	int r;
+
+	if (fence == NULL) {
+		DRM_ERROR("Querying an invalid fence : %p !\n", fence);
+		return -EINVAL;
+	}
+
+	r = radeon_fence_wait_seq(fence->rdev, fence->seq,
+				  fence->ring, intr, true);
+	if (r) {
+		return r;
+	}
+	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+	return 0;
+}
+
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+	unsigned i;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_wait_any_seq - wait for a sequence number on any ring
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics).  Sequnce number array is indexed by ring id.
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number.  Helper function
+ * for radeon_fence_wait_any(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ */
+static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
+				     u64 *target_seq, bool intr)
+{
+	unsigned long timeout, last_activity, tmp;
+	unsigned i, ring = RADEON_NUM_RINGS;
+	bool signaled, fence_queue_locked;
+	int r;
+
+	for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!target_seq[i]) {
+			continue;
+		}
+
+		/* use the most recent one as indicator */
+		if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+			last_activity = rdev->fence_drv[i].last_activity;
+		}
+
+		/* For lockup detection just pick the lowest ring we are
+		 * actively waiting for
+		 */
+		if (i < ring) {
+			ring = i;
+		}
+	}
+
+	/* nothing to wait for ? */
+	if (ring == RADEON_NUM_RINGS) {
+		return -ENOENT;
+	}
+
+	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+		if (time_after(last_activity, timeout)) {
+			/* the normal case, timeout is somewhere before last_activity */
+			timeout = last_activity - timeout;
+		} else {
+			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
+			 * anyway we will just wait for the minimum amount and then check for a lockup
+			 */
+			timeout = 1;
+		}
+
+		CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, target_seq=%d)",
+		    ring, target_seq[ring]);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (target_seq[i]) {
+				radeon_irq_kms_sw_irq_get(rdev, i);
+			}
+		}
+		fence_queue_locked = false;
+		r = 0;
+		while (!(signaled = radeon_fence_any_seq_signaled(rdev,
+		    target_seq))) {
+			if (!fence_queue_locked) {
+				mtx_lock(&rdev->fence_queue_mtx);
+				fence_queue_locked = true;
+			}
+			if (intr) {
+				r = cv_timedwait_sig(&rdev->fence_queue,
+				    &rdev->fence_queue_mtx,
+				    timeout);
+			} else {
+				r = cv_timedwait(&rdev->fence_queue,
+				    &rdev->fence_queue_mtx,
+				    timeout);
+			}
+			if (r == EINTR)
+				r = ERESTARTSYS;
+			if (r != 0) {
+				if (r == EWOULDBLOCK) {
+					signaled =
+					    radeon_fence_any_seq_signaled(
+						rdev, target_seq);
+				}
+				break;
+			}
+		}
+		if (fence_queue_locked) {
+			mtx_unlock(&rdev->fence_queue_mtx);
+		}
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (target_seq[i]) {
+				radeon_irq_kms_sw_irq_put(rdev, i);
+			}
+		}
+		if (unlikely(r == ERESTARTSYS)) {
+			return -r;
+		}
+		CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, target_seq=%d)",
+		    ring, target_seq[ring]);
+
+		if (unlikely(!signaled)) {
+#ifndef __FreeBSD__
+			/* we were interrupted for some reason and fence
+			 * isn't signaled yet, resume waiting */
+			if (r) {
+				continue;
+			}
+#endif
+
+			sx_xlock(&rdev->ring_lock);
+			for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+				if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+					tmp = rdev->fence_drv[i].last_activity;
+				}
+			}
+			/* test if somebody else has already decided that this is a lockup */
+			if (last_activity != tmp) {
+				last_activity = tmp;
+				sx_xunlock(&rdev->ring_lock);
+				continue;
+			}
+
+			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+				/* good news we believe it's a lockup */
+				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n",
+					 (uintmax_t)target_seq[ring]);
+
+				/* change last activity so nobody else think there is a lockup */
+				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+					rdev->fence_drv[i].last_activity = jiffies;
+				}
+
+				/* mark the ring as not ready any more */
+				rdev->ring[ring].ready = false;
+				sx_xunlock(&rdev->ring_lock);
+				return -EDEADLK;
+			}
+			sx_xunlock(&rdev->ring_lock);
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_wait_any - wait for a fence to signal on any ring
+ *
+ * @rdev: radeon device pointer
+ * @fences: radeon fence object(s)
+ * @intr: use interruptable sleep
+ *
+ * Wait for any requested fence to signal (all asics).  Fence
+ * array is indexed by ring id.  @intr selects whether to use
+ * interruptable (true) or non-interruptable (false) sleep when
+ * waiting for the fences. Used by the suballocator.
+ * Returns 0 if any fence has passed, error for all other cases.
+ */
+int radeon_fence_wait_any(struct radeon_device *rdev,
+			  struct radeon_fence **fences,
+			  bool intr)
+{
+	uint64_t seq[RADEON_NUM_RINGS];
+	unsigned i;
+	int r;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		seq[i] = 0;
+
+		if (!fences[i]) {
+			continue;
+		}
+
+		if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
+			/* something was allready signaled */
+			return 0;
+		}
+
+		seq[i] = fences[i]->seq;
+	}
+
+	r = radeon_fence_wait_any_seq(rdev, seq, intr);
+	if (r) {
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_wait_next_locked - wait for the next fence to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for the next fence on the requested ring to signal (all asics).
+ * Returns 0 if the next fence has passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq;
+
+	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+	if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
+		/* nothing to wait for, last_seq is
+		   already the last emited fence */
+		return -ENOENT;
+	}
+	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+}
+
+/**
+ * radeon_fence_wait_empty_locked - wait for all fences to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns 0 if the fences have passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+	int r;
+
+	r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+	if (r) {
+		if (r == -EDEADLK) {
+			return -EDEADLK;
+		}
+		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+			ring, r);
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_ref - take a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Take a reference on a fence (all asics).
+ * Returns the fence.
+ */
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
+{
+	refcount_acquire(&fence->kref);
+	return fence;
+}
+
+/**
+ * radeon_fence_unref - remove a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Remove a reference on a fence (all asics).
+ */
+void radeon_fence_unref(struct radeon_fence **fence)
+{
+	struct radeon_fence *tmp = *fence;
+
+	*fence = NULL;
+	if (tmp) {
+		if (refcount_release(&tmp->kref)) {
+			radeon_fence_destroy(tmp);
+		}
+	}
+}
+
+/**
+ * radeon_fence_count_emitted - get the count of emitted fences
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Get the number of fences emitted on the requested ring (all asics).
+ * Returns the number of emitted fences on the ring.  Used by the
+ * dynpm code to ring track activity.
+ */
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+	uint64_t emitted;
+
+	/* We are not protected by ring lock when reading the last sequence
+	 * but it's ok to report slightly wrong fence count here.
+	 */
+	radeon_fence_process(rdev, ring);
+	emitted = rdev->fence_drv[ring].sync_seq[ring]
+		- atomic64_read(&rdev->fence_drv[ring].last_seq);
+	/* to avoid 32bits warp around */
+	if (emitted > 0x10000000) {
+		emitted = 0x10000000;
+	}
+	return (unsigned)emitted;
+}
+
+/**
+ * radeon_fence_need_sync - do we need a semaphore
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Check if the fence needs to be synced against another ring
+ * (all asics).  If so, we need to emit a semaphore.
+ * Returns true if we need to sync with another ring, false if
+ * not.
+ */
+bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
+{
+	struct radeon_fence_driver *fdrv;
+
+	if (!fence) {
+		return false;
+	}
+
+	if (fence->ring == dst_ring) {
+		return false;
+	}
+
+	/* we are protected by the ring mutex */
+	fdrv = &fence->rdev->fence_drv[dst_ring];
+	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * radeon_fence_note_sync - record the sync point
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Note the sequence number at which point the fence will
+ * be synced with the requested ring (all asics).
+ */
+void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
+{
+	struct radeon_fence_driver *dst, *src;
+	unsigned i;
+
+	if (!fence) {
+		return;
+	}
+
+	if (fence->ring == dst_ring) {
+		return;
+	}
+
+	/* we are protected by the ring mutex */
+	src = &fence->rdev->fence_drv[fence->ring];
+	dst = &fence->rdev->fence_drv[dst_ring];
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (i == dst_ring) {
+			continue;
+		}
+		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
+	}
+}
+
+/**
+ * radeon_fence_driver_start_ring - make the fence driver
+ * ready for use on the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Make the fence driver ready for processing (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has.
+ * Returns 0 for success, errors for failure.
+ */
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
+{
+	uint64_t index;
+	int r;
+
+	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
+		rdev->fence_drv[ring].scratch_reg = 0;
+		index = R600_WB_EVENT_OFFSET + ring * 4;
+	} else {
+		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+		if (r) {
+			dev_err(rdev->dev, "fence failed to get scratch register\n");
+			return r;
+		}
+		index = RADEON_WB_SCRATCH_OFFSET +
+			rdev->fence_drv[ring].scratch_reg -
+			rdev->scratch.reg_base;
+	}
+	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
+	rdev->fence_drv[ring].initialized = true;
+	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n",
+		 ring, (uintmax_t)rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+	return 0;
+}
+
+/**
+ * radeon_fence_driver_init_ring - init the fence driver
+ * for the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Init the fence driver for the requested ring (all asics).
+ * Helper function for radeon_fence_driver_init().
+ */
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+	int i;
+
+	rdev->fence_drv[ring].scratch_reg = -1;
+	rdev->fence_drv[ring].cpu_addr = NULL;
+	rdev->fence_drv[ring].gpu_addr = 0;
+	for (i = 0; i < RADEON_NUM_RINGS; ++i)
+		rdev->fence_drv[ring].sync_seq[i] = 0;
+	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+	rdev->fence_drv[ring].last_activity = jiffies;
+	rdev->fence_drv[ring].initialized = false;
+}
+
+/**
+ * radeon_fence_driver_init - init the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Init the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * radeon_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+	int ring;
+
+	mtx_init(&rdev->fence_queue_mtx,
+	    "drm__radeon_device__fence_queue_mtx", NULL, MTX_DEF);
+	cv_init(&rdev->fence_queue, "drm__radeon_device__fence_queue");
+	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+		radeon_fence_driver_init_ring(rdev, ring);
+	}
+	if (radeon_debugfs_fence_init(rdev)) {
+		dev_err(rdev->dev, "fence debugfs file creation failed\n");
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_driver_fini - tear down the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tear down the fence driver for all possible rings (all asics).
+ */
+void radeon_fence_driver_fini(struct radeon_device *rdev)
+{
+	int ring, r;
+
+	sx_xlock(&rdev->ring_lock);
+	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+		if (!rdev->fence_drv[ring].initialized)
+			continue;
+		r = radeon_fence_wait_empty_locked(rdev, ring);
+		if (r) {
+			/* no need to trigger GPU reset as we are unloading */
+			radeon_fence_driver_force_completion(rdev);
+		}
+		cv_broadcast(&rdev->fence_queue);
+		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+		rdev->fence_drv[ring].initialized = false;
+		cv_destroy(&rdev->fence_queue);
+	}
+	sx_xunlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+{
+	int ring;
+
+	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+		if (!rdev->fence_drv[ring].initialized)
+			continue;
+		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+	}
+}
+
+
+/*
+ * Fence debugfs
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i, j;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!rdev->fence_drv[i].initialized)
+			continue;
+
+		seq_printf(m, "--- ring %d ---\n", i);
+		seq_printf(m, "Last signaled fence 0x%016llx\n",
+			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
+		seq_printf(m, "Last emitted        0x%016llx\n",
+			   rdev->fence_drv[i].sync_seq[i]);
+
+		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
+			if (i != j && rdev->fence_drv[j].initialized)
+				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
+					   j, rdev->fence_drv[i].sync_seq[j]);
+		}
+	}
+	return 0;
+}
+
+static struct drm_info_list radeon_debugfs_fence_list[] = {
+	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
+};
+#endif
+
+int radeon_debugfs_fence_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
+#else
+	return 0;
+#endif
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_fence.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_gart.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_gart.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_gart.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1310 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_gart.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_reg.h"
+
+/*
+ * GART
+ * The GART (Graphics Aperture Remapping Table) is an aperture
+ * in the GPU's address space.  System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective.  A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal GART, as described above,
+ * and AGP.  AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal GART management.
+ */
+
+/*
+ * Common GART table functions.
+ */
+/**
+ * radeon_gart_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
+{
+	drm_dma_handle_t *dmah;
+
+	dmah = drm_pci_alloc(rdev->ddev, rdev->gart.table_size,
+	    PAGE_SIZE, BUS_SPACE_MAXADDR);
+	if (dmah == NULL) {
+		return -ENOMEM;
+	}
+	rdev->gart.dmah = dmah;
+	rdev->gart.ptr = dmah->vaddr;
+#ifdef CONFIG_X86
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		pmap_change_attr((vm_offset_t)rdev->gart.ptr,
+		    rdev->gart.table_size >> PAGE_SHIFT, PAT_UNCACHED);
+	}
+#endif
+	rdev->gart.table_addr = dmah->busaddr;
+	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
+	return 0;
+}
+
+/**
+ * radeon_gart_table_ram_free - free system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ */
+void radeon_gart_table_ram_free(struct radeon_device *rdev)
+{
+	if (rdev->gart.ptr == NULL) {
+		return;
+	}
+#ifdef CONFIG_X86
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		pmap_change_attr((vm_offset_t)rdev->gart.ptr,
+		    rdev->gart.table_size >> PAGE_SHIFT, PAT_WRITE_COMBINING);
+	}
+#endif
+	drm_pci_free(rdev->ddev, rdev->gart.dmah);
+	rdev->gart.dmah = NULL;
+	rdev->gart.ptr = NULL;
+	rdev->gart.table_addr = 0;
+}
+
+/**
+ * radeon_gart_table_vram_alloc - allocate vram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate video memory for GART page table
+ * (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		r = radeon_bo_create(rdev, rdev->gart.table_size,
+				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+				     NULL, &rdev->gart.robj);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_gart_table_vram_pin - pin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Pin the GART page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_table_vram_pin(struct radeon_device *rdev)
+{
+	uint64_t gpu_addr;
+	int r;
+
+	r = radeon_bo_reserve(rdev->gart.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->gart.robj,
+				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->gart.robj);
+		return r;
+	}
+	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
+	if (r)
+		radeon_bo_unpin(rdev->gart.robj);
+	radeon_bo_unreserve(rdev->gart.robj);
+	rdev->gart.table_addr = gpu_addr;
+	return r;
+}
+
+/**
+ * radeon_gart_table_vram_unpin - unpin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		return;
+	}
+	r = radeon_bo_reserve(rdev->gart.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->gart.robj);
+		radeon_bo_unpin(rdev->gart.robj);
+		radeon_bo_unreserve(rdev->gart.robj);
+		rdev->gart.ptr = NULL;
+	}
+}
+
+/**
+ * radeon_gart_table_vram_free - free gart page table vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+).  These asics require the gart table to
+ * be in video memory.
+ */
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
+{
+	if (rdev->gart.robj == NULL) {
+		return;
+	}
+	radeon_gart_table_vram_unpin(rdev);
+	radeon_bo_unref(&rdev->gart.robj);
+}
+
+/*
+ * Common gart functions.
+ */
+/**
+ * radeon_gart_unbind - unbind pages from the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ */
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+			int pages)
+{
+	unsigned t;
+	unsigned p;
+	int i, j;
+	u64 page_base;
+
+	if (!rdev->gart.ready) {
+		DRM_ERROR("trying to unbind memory from uninitialized GART !\n");
+		return;
+	}
+	t = offset / RADEON_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+	for (i = 0; i < pages; i++, p++) {
+		if (rdev->gart.pages[p]) {
+			rdev->gart.pages[p] = NULL;
+			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+			page_base = rdev->gart.pages_addr[p];
+			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+				if (rdev->gart.ptr) {
+					radeon_gart_set_page(rdev, t, page_base);
+				}
+				page_base += RADEON_GPU_PAGE_SIZE;
+			}
+		}
+	}
+	mb();
+	radeon_gart_tlb_flush(rdev);
+}
+
+/**
+ * radeon_gart_bind - bind pages into the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+		     int pages, vm_page_t *pagelist, dma_addr_t *dma_addr)
+{
+	unsigned t;
+	unsigned p;
+	uint64_t page_base;
+	int i, j;
+
+	if (!rdev->gart.ready) {
+		DRM_ERROR("trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+	t = offset / RADEON_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+
+	for (i = 0; i < pages; i++, p++) {
+		rdev->gart.pages_addr[p] = dma_addr[i];
+		rdev->gart.pages[p] = pagelist[i];
+		if (rdev->gart.ptr) {
+			page_base = rdev->gart.pages_addr[p];
+			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+				radeon_gart_set_page(rdev, t, page_base);
+				page_base += RADEON_GPU_PAGE_SIZE;
+			}
+		}
+	}
+	mb();
+	radeon_gart_tlb_flush(rdev);
+	return 0;
+}
+
+/**
+ * radeon_gart_restore - bind all pages in the gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Binds all pages in the gart page table (all asics).
+ * Used to rebuild the gart table on device startup or resume.
+ */
+void radeon_gart_restore(struct radeon_device *rdev)
+{
+	int i, j, t;
+	u64 page_base;
+
+	if (!rdev->gart.ptr) {
+		return;
+	}
+	for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
+		page_base = rdev->gart.pages_addr[i];
+		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+			radeon_gart_set_page(rdev, t, page_base);
+			page_base += RADEON_GPU_PAGE_SIZE;
+		}
+	}
+	mb();
+	radeon_gart_tlb_flush(rdev);
+}
+
+/**
+ * radeon_gart_init - init the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_init(struct radeon_device *rdev)
+{
+	int r, i;
+
+	if (rdev->gart.pages) {
+		return 0;
+	}
+	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
+	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
+		DRM_ERROR("Page size is smaller than GPU page size!\n");
+		return -EINVAL;
+	}
+	r = radeon_dummy_page_init(rdev);
+	if (r)
+		return r;
+	/* Compute table size */
+	rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
+	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
+	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+		 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
+	/* Allocate pages table */
+	rdev->gart.pages = malloc(sizeof(void *) * rdev->gart.num_cpu_pages,
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (rdev->gart.pages == NULL) {
+		radeon_gart_fini(rdev);
+		return -ENOMEM;
+	}
+	rdev->gart.pages_addr = malloc(sizeof(dma_addr_t) *
+					rdev->gart.num_cpu_pages,
+					DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (rdev->gart.pages_addr == NULL) {
+		radeon_gart_fini(rdev);
+		return -ENOMEM;
+	}
+	/* set GART entry to point to the dummy page by default */
+	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+	}
+	return 0;
+}
+
+/**
+ * radeon_gart_fini - tear down the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
+void radeon_gart_fini(struct radeon_device *rdev)
+{
+	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+		/* unbind pages */
+		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
+	}
+	rdev->gart.ready = false;
+	free(rdev->gart.pages, DRM_MEM_DRIVER);
+	free(rdev->gart.pages_addr, DRM_MEM_DRIVER);
+	rdev->gart.pages = NULL;
+	rdev->gart.pages_addr = NULL;
+
+	radeon_dummy_page_fini(rdev);
+}
+
+/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time.  The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID.  When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer.  VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+
+/**
+ * radeon_vm_num_pde - return the number of page directory entries
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
+{
+	return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
+}
+
+/**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+	return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
+}
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+	struct radeon_vm *vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+	unsigned size;
+
+	if (!rdev->vm_manager.enabled) {
+		/* allocate enough for 2 full VM pts */
+		size = radeon_vm_directory_size(rdev);
+		size += rdev->vm_manager.max_pfn * 8;
+		size *= 2;
+		r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+					      RADEON_GPU_PAGE_ALIGN(size),
+					      RADEON_GEM_DOMAIN_VRAM);
+		if (r) {
+			dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+				(rdev->vm_manager.max_pfn * 8) >> 10);
+			return r;
+		}
+
+		r = radeon_asic_vm_init(rdev);
+		if (r)
+			return r;
+
+		rdev->vm_manager.enabled = true;
+
+		r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+		if (r)
+			return r;
+	}
+
+	/* restore page table */
+	list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
+		if (vm->page_directory == NULL)
+			continue;
+
+		list_for_each_entry(bo_va, &vm->va, vm_list) {
+			bo_va->valid = false;
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_vm_free_pt - free the page table for a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to unbind
+ *
+ * Free the page table of a specific vm (cayman+).
+ *
+ * Global and local mutex must be lock!
+ */
+static void radeon_vm_free_pt(struct radeon_device *rdev,
+				    struct radeon_vm *vm)
+{
+	struct radeon_bo_va *bo_va;
+	int i;
+
+	if (!vm->page_directory)
+		return;
+
+	list_del_init(&vm->list);
+	radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+
+	list_for_each_entry(bo_va, &vm->va, vm_list) {
+		bo_va->valid = false;
+	}
+
+	if (vm->page_tables == NULL)
+		return;
+
+	for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+		radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
+
+	free(vm->page_tables, DRM_MEM_DRIVER);
+}
+
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+	struct radeon_vm *vm, *tmp;
+	int i;
+
+	if (!rdev->vm_manager.enabled)
+		return;
+
+	sx_xlock(&rdev->vm_manager.lock);
+	/* free all allocated page tables */
+	list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+		sx_xlock(&vm->mutex);
+		radeon_vm_free_pt(rdev, vm);
+		sx_xunlock(&vm->mutex);
+	}
+	for (i = 0; i < RADEON_NUM_VM; ++i) {
+		radeon_fence_unref(&rdev->vm_manager.active[i]);
+	}
+	radeon_asic_vm_fini(rdev);
+	sx_xunlock(&rdev->vm_manager.lock);
+
+	radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+	radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+	rdev->vm_manager.enabled = false;
+}
+
+/**
+ * radeon_vm_evict - evict page table to make room for new one
+ *
+ * @rdev: radeon_device pointer
+ * @vm: VM we want to allocate something for
+ *
+ * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
+ * Returns 0 for success, -ENOMEM for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	struct radeon_vm *vm_evict;
+
+	if (list_empty(&rdev->vm_manager.lru_vm))
+		return -ENOMEM;
+
+	vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
+				    struct radeon_vm, list);
+	if (vm_evict == vm)
+		return -ENOMEM;
+
+	sx_xlock(&vm_evict->mutex);
+	radeon_vm_free_pt(rdev, vm_evict);
+	sx_xunlock(&vm_evict->mutex);
+	return 0;
+}
+
+/**
+ * radeon_vm_alloc_pt - allocates a page table for a VM
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to bind
+ *
+ * Allocate a page table for the requested vm (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	unsigned pd_size, pts_size;
+	u64 *pd_addr;
+	int r;
+
+	if (vm == NULL) {
+		return -EINVAL;
+	}
+
+	if (vm->page_directory != NULL) {
+		return 0;
+	}
+
+retry:
+	pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+	r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+			     &vm->page_directory, pd_size,
+			     RADEON_GPU_PAGE_SIZE, false);
+	if (r == -ENOMEM) {
+		r = radeon_vm_evict(rdev, vm);
+		if (r)
+			return r;
+		goto retry;
+
+	} else if (r) {
+		return r;
+	}
+
+	vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
+
+	/* Initially clear the page directory */
+	pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
+	memset(pd_addr, 0, pd_size);
+
+	pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
+	vm->page_tables = malloc(pts_size, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+	if (vm->page_tables == NULL) {
+		DRM_ERROR("Cannot allocate memory for page table array\n");
+		radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_vm_add_to_lru - add VMs page table to LRU list
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to add to LRU
+ *
+ * Add the allocated page table to the LRU list (cayman+).
+ *
+ * Global mutex must be locked!
+ */
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	list_del_init(&vm->list);
+	list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+				       struct radeon_vm *vm, int ring)
+{
+	struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+	unsigned choices[2] = {};
+	unsigned i;
+
+	/* check if the id is still valid */
+	if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+		return NULL;
+
+	/* we definately need to flush */
+	radeon_fence_unref(&vm->last_flush);
+
+	/* skip over VMID 0, since it is the system VM */
+	for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+		struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+		if (fence == NULL) {
+			/* found a free one */
+			vm->id = i;
+			return NULL;
+		}
+
+		if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+			best[fence->ring] = fence;
+			choices[fence->ring == ring ? 0 : 1] = i;
+		}
+	}
+
+	for (i = 0; i < 2; ++i) {
+		if (choices[i]) {
+			vm->id = choices[i];
+			return rdev->vm_manager.active[choices[i]];
+		}
+	}
+
+	/* should never happen */
+	panic("%s: failed to allocate next VMID", __func__);
+	return NULL;
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_fence *fence)
+{
+	radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+	rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
+	radeon_fence_unref(&vm->fence);
+	vm->fence = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+				       struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		if (bo_va->vm == vm) {
+			return bo_va;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+				      struct radeon_vm *vm,
+				      struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	bo_va = malloc(sizeof(struct radeon_bo_va),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (bo_va == NULL) {
+		return NULL;
+	}
+	bo_va->vm = vm;
+	bo_va->bo = bo;
+	bo_va->soffset = 0;
+	bo_va->eoffset = 0;
+	bo_va->flags = 0;
+	bo_va->valid = false;
+	bo_va->ref_count = 1;
+	INIT_LIST_HEAD(&bo_va->bo_list);
+	INIT_LIST_HEAD(&bo_va->vm_list);
+
+	sx_xlock(&vm->mutex);
+	list_add(&bo_va->vm_list, &vm->va);
+	list_add_tail(&bo_va->bo_list, &bo->va);
+	sx_xunlock(&vm->mutex);
+
+	return bo_va;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+			  struct radeon_bo_va *bo_va,
+			  uint64_t soffset,
+			  uint32_t flags)
+{
+	uint64_t size = radeon_bo_size(bo_va->bo);
+	uint64_t eoffset, last_offset = 0;
+	struct radeon_vm *vm = bo_va->vm;
+	struct radeon_bo_va *tmp;
+	struct list_head *head;
+	unsigned last_pfn;
+
+	if (soffset) {
+		/* make sure object fit at this offset */
+		eoffset = soffset + size;
+		if (soffset >= eoffset) {
+			return -EINVAL;
+		}
+
+		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+		if (last_pfn > rdev->vm_manager.max_pfn) {
+			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+				last_pfn, rdev->vm_manager.max_pfn);
+			return -EINVAL;
+		}
+
+	} else {
+		eoffset = last_pfn = 0;
+	}
+
+	sx_xlock(&vm->mutex);
+	head = &vm->va;
+	last_offset = 0;
+	list_for_each_entry(tmp, &vm->va, vm_list) {
+		if (bo_va == tmp) {
+			/* skip over currently modified bo */
+			continue;
+		}
+
+		if (soffset >= last_offset && eoffset <= tmp->soffset) {
+			/* bo can be added before this one */
+			break;
+		}
+		if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
+			/* bo and tmp overlap, invalid offset */
+			dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+				bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
+				(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+			sx_xunlock(&vm->mutex);
+			return -EINVAL;
+		}
+		last_offset = tmp->eoffset;
+		head = &tmp->vm_list;
+	}
+
+	bo_va->soffset = soffset;
+	bo_va->eoffset = eoffset;
+	bo_va->flags = flags;
+	bo_va->valid = false;
+	list_move(&bo_va->vm_list, head);
+
+	sx_xunlock(&vm->mutex);
+	return 0;
+}
+
+/**
+ * radeon_vm_map_gart - get the physical address of a gart page
+ *
+ * @rdev: radeon_device pointer
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
+{
+	uint64_t result;
+
+	/* page table offset */
+	result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+	/* in case cpu page size != gpu page size*/
+	/*
+	 * FreeBSD port note: FreeBSD's PAGE_MASK is the inverse of
+	 * Linux's one. That's why the test below doesn't inverse the
+	 * constant.
+	 */
+	result |= addr & (PAGE_MASK);
+
+	return result;
+}
+
+/**
+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_update_pdes(struct radeon_device *rdev,
+				 struct radeon_vm *vm,
+				 uint64_t start, uint64_t end)
+{
+	static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+
+	uint64_t last_pde = ~0, last_pt = ~0;
+	unsigned count = 0;
+	uint64_t pt_idx;
+	int r;
+
+	start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+	end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+	/* walk over the address space and update the page directory */
+	for (pt_idx = start; pt_idx <= end; ++pt_idx) {
+		uint64_t pde, pt;
+
+		if (vm->page_tables[pt_idx])
+			continue;
+
+retry:
+		r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+				     &vm->page_tables[pt_idx],
+				     RADEON_VM_PTE_COUNT * 8,
+				     RADEON_GPU_PAGE_SIZE, false);
+
+		if (r == -ENOMEM) {
+			r = radeon_vm_evict(rdev, vm);
+			if (r)
+				return r;
+			goto retry;
+		} else if (r) {
+			return r;
+		}
+
+		pde = vm->pd_gpu_addr + pt_idx * 8;
+
+		pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+
+		if (((last_pde + 8 * count) != pde) ||
+		    ((last_pt + incr * count) != pt)) {
+
+			if (count) {
+				radeon_asic_vm_set_page(rdev, last_pde,
+							last_pt, count, incr,
+							RADEON_VM_PAGE_VALID);
+			}
+
+			count = 1;
+			last_pde = pde;
+			last_pt = pt;
+		} else {
+			++count;
+		}
+	}
+
+	if (count) {
+		radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
+					incr, RADEON_VM_PAGE_VALID);
+
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_update_ptes(struct radeon_device *rdev,
+				  struct radeon_vm *vm,
+				  uint64_t start, uint64_t end,
+				  uint64_t dst, uint32_t flags)
+{
+	static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+
+	uint64_t last_pte = ~0, last_dst = ~0;
+	unsigned count = 0;
+	uint64_t addr;
+
+	start = start / RADEON_GPU_PAGE_SIZE;
+	end = end / RADEON_GPU_PAGE_SIZE;
+
+	/* walk over the address space and update the page tables */
+	for (addr = start; addr < end; ) {
+		uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+		unsigned nptes;
+		uint64_t pte;
+
+		if ((addr & ~mask) == (end & ~mask))
+			nptes = end - addr;
+		else
+			nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+		pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+		pte += (addr & mask) * 8;
+
+		if ((last_pte + 8 * count) != pte) {
+
+			if (count) {
+				radeon_asic_vm_set_page(rdev, last_pte,
+							last_dst, count,
+							RADEON_GPU_PAGE_SIZE,
+							flags);
+			}
+
+			count = nptes;
+			last_pte = pte;
+			last_dst = dst;
+		} else {
+			count += nptes;
+		}
+
+		addr += nptes;
+		dst += nptes * RADEON_GPU_PAGE_SIZE;
+	}
+
+	if (count) {
+		radeon_asic_vm_set_page(rdev, last_pte,	last_dst, count,
+					RADEON_GPU_PAGE_SIZE, flags);
+	}
+}
+
+/**
+ * radeon_vm_bo_update_pte - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved & global and local mutex must be locked!
+ */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+			    struct radeon_vm *vm,
+			    struct radeon_bo *bo,
+			    struct ttm_mem_reg *mem)
+{
+	unsigned ridx = rdev->asic->vm.pt_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ridx];
+	struct radeon_semaphore *sem = NULL;
+	struct radeon_bo_va *bo_va;
+	unsigned nptes, npdes, ndw;
+	uint64_t addr;
+	int r;
+
+	/* nothing to do if vm isn't bound */
+	if (vm->page_directory == NULL)
+		return 0;
+
+	bo_va = radeon_vm_bo_find(vm, bo);
+	if (bo_va == NULL) {
+		dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+		return -EINVAL;
+	}
+
+	if (!bo_va->soffset) {
+		dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+			bo, vm);
+		return -EINVAL;
+	}
+
+	if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
+		return 0;
+
+	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+	if (mem) {
+		addr = mem->start << PAGE_SHIFT;
+		if (mem->mem_type != TTM_PL_SYSTEM) {
+			bo_va->flags |= RADEON_VM_PAGE_VALID;
+			bo_va->valid = true;
+		}
+		if (mem->mem_type == TTM_PL_TT) {
+			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+		} else {
+			addr += rdev->vm_manager.vram_base_offset;
+		}
+	} else {
+		addr = 0;
+		bo_va->valid = false;
+	}
+
+	if (vm->fence && radeon_fence_signaled(vm->fence)) {
+		radeon_fence_unref(&vm->fence);
+	}
+
+	if (vm->fence && vm->fence->ring != ridx) {
+		r = radeon_semaphore_create(rdev, &sem);
+		if (r) {
+			return r;
+		}
+	}
+
+	nptes = radeon_bo_ngpu_pages(bo);
+
+	/* assume two extra pdes in case the mapping overlaps the borders */
+	npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
+
+	/* estimate number of dw needed */
+	/* semaphore, fence and padding */
+	ndw = 32;
+
+	if (RADEON_VM_BLOCK_SIZE > 11)
+		/* reserve space for one header for every 2k dwords */
+		ndw += (nptes >> 11) * 4;
+	else
+		/* reserve space for one header for
+		    every (1 << BLOCK_SIZE) entries */
+		ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
+
+	/* reserve space for pte addresses */
+	ndw += nptes * 2;
+
+	/* reserve space for one header for every 2k dwords */
+	ndw += (npdes >> 11) * 4;
+
+	/* reserve space for pde addresses */
+	ndw += npdes * 2;
+
+	r = radeon_ring_lock(rdev, ring, ndw);
+	if (r) {
+		return r;
+	}
+
+	if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
+		radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
+		radeon_fence_note_sync(vm->fence, ridx);
+	}
+
+	r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
+			      addr, bo_va->flags);
+
+	radeon_fence_unref(&vm->fence);
+	r = radeon_fence_emit(rdev, &vm->fence, ridx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, vm->fence);
+	radeon_fence_unref(&vm->last_flush);
+
+	return 0;
+}
+
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: requested bo_va
+ *
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
+ * remove the ptes for @bo_va in the page table.
+ * Returns 0 for success.
+ *
+ * Object have to be reserved!
+ */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+		     struct radeon_bo_va *bo_va)
+{
+	int r;
+
+	sx_xlock(&rdev->vm_manager.lock);
+	sx_xlock(&bo_va->vm->mutex);
+	r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+	sx_xunlock(&rdev->vm_manager.lock);
+	list_del(&bo_va->vm_list);
+	sx_xunlock(&bo_va->vm->mutex);
+	list_del(&bo_va->bo_list);
+
+	free(bo_va, DRM_MEM_DRIVER);
+	return r;
+}
+
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+			     struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		bo_va->valid = false;
+	}
+}
+
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm fields (cayman+).
+ */
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	vm->id = 0;
+	vm->fence = NULL;
+	sx_init(&vm->mutex, "drm__radeon_vm__mutex");
+	INIT_LIST_HEAD(&vm->list);
+	INIT_LIST_HEAD(&vm->va);
+}
+
+/**
+ * radeon_vm_fini - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	struct radeon_bo_va *bo_va, *tmp;
+	int r;
+
+	sx_xlock(&rdev->vm_manager.lock);
+	sx_xlock(&vm->mutex);
+	radeon_vm_free_pt(rdev, vm);
+	sx_xunlock(&rdev->vm_manager.lock);
+
+	if (!list_empty(&vm->va)) {
+		dev_err(rdev->dev, "still active bo inside vm\n");
+	}
+	list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+		list_del_init(&bo_va->vm_list);
+		r = radeon_bo_reserve(bo_va->bo, false);
+		if (!r) {
+			list_del_init(&bo_va->bo_list);
+			radeon_bo_unreserve(bo_va->bo);
+			free(bo_va, DRM_MEM_DRIVER);
+		}
+	}
+	radeon_fence_unref(&vm->fence);
+	radeon_fence_unref(&vm->last_flush);
+	sx_xunlock(&vm->mutex);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_gart.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_gem.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_gem.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_gem.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,589 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_gem.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_gem.h"
+
+int radeon_gem_object_init(struct drm_gem_object *obj)
+{
+	panic("radeon_gem_object_init() must not be called");
+
+	return 0;
+}
+
+void radeon_gem_object_free(struct drm_gem_object *gobj)
+{
+	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
+
+	if (robj) {
+#ifdef FREEBSD_WIP
+		if (robj->gem_base.import_attach)
+			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+#endif /* FREEBSD_WIP */
+		radeon_bo_unref(&robj);
+	}
+}
+
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+				int alignment, int initial_domain,
+				bool discardable, bool kernel,
+				struct drm_gem_object **obj)
+{
+	struct radeon_bo *robj;
+	unsigned long max_size;
+	int r;
+
+	*obj = NULL;
+	/* At least align on page size */
+	if (alignment < PAGE_SIZE) {
+		alignment = PAGE_SIZE;
+	}
+
+	/* maximun bo size is the minimun btw visible vram and gtt size */
+	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+	if (size > max_size) {
+		DRM_ERROR("%s:%d alloc size %dMb bigger than %ldMb limit\n",
+		       __func__, __LINE__, size >> 20, max_size >> 20);
+		return -ENOMEM;
+	}
+
+retry:
+	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
+	if (r) {
+		if (r != -ERESTARTSYS) {
+			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
+				initial_domain |= RADEON_GEM_DOMAIN_GTT;
+				goto retry;
+			}
+			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+				  size, initial_domain, alignment, r);
+		}
+		return r;
+	}
+	*obj = &robj->gem_base;
+
+	sx_xlock(&rdev->gem.mutex);
+	list_add_tail(&robj->list, &rdev->gem.objects);
+	sx_xunlock(&rdev->gem.mutex);
+
+	return 0;
+}
+
+static int radeon_gem_set_domain(struct drm_gem_object *gobj,
+			  uint32_t rdomain, uint32_t wdomain)
+{
+	struct radeon_bo *robj;
+	uint32_t domain;
+	int r;
+
+	/* FIXME: reeimplement */
+	robj = gem_to_radeon_bo(gobj);
+	/* work out where to validate the buffer to */
+	domain = wdomain;
+	if (!domain) {
+		domain = rdomain;
+	}
+	if (!domain) {
+		/* Do nothings */
+		DRM_ERROR("Set domain without domain !\n");
+		return 0;
+	}
+	if (domain == RADEON_GEM_DOMAIN_CPU) {
+		/* Asking for cpu access wait for object idle */
+		r = radeon_bo_wait(robj, NULL, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for object !\n");
+			return r;
+		}
+	}
+	return 0;
+}
+
+int radeon_gem_init(struct radeon_device *rdev)
+{
+	INIT_LIST_HEAD(&rdev->gem.objects);
+	return 0;
+}
+
+void radeon_gem_fini(struct radeon_device *rdev)
+{
+	radeon_bo_force_delete(rdev);
+}
+
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+	struct radeon_device *rdev = rbo->rdev;
+	struct radeon_fpriv *fpriv = file_priv->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+
+	if (rdev->family < CHIP_CAYMAN) {
+		return 0;
+	}
+
+	r = radeon_bo_reserve(rbo, false);
+	if (r) {
+		return r;
+	}
+
+	bo_va = radeon_vm_bo_find(vm, rbo);
+	if (!bo_va) {
+		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
+	} else {
+		++bo_va->ref_count;
+	}
+	radeon_bo_unreserve(rbo);
+
+	return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+			     struct drm_file *file_priv)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+	struct radeon_device *rdev = rbo->rdev;
+	struct radeon_fpriv *fpriv = file_priv->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+
+	if (rdev->family < CHIP_CAYMAN) {
+		return;
+	}
+
+	r = radeon_bo_reserve(rbo, true);
+	if (r) {
+		dev_err(rdev->dev, "leaking bo va because "
+			"we fail to reserve bo (%d)\n", r);
+		return;
+	}
+	bo_va = radeon_vm_bo_find(vm, rbo);
+	if (bo_va) {
+		if (--bo_va->ref_count == 0) {
+			radeon_vm_bo_rmv(rdev, bo_va);
+		}
+	}
+	radeon_bo_unreserve(rbo);
+}
+
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+	if (r == -EDEADLK) {
+		r = radeon_gpu_reset(rdev);
+		if (!r)
+			r = -EAGAIN;
+	}
+	return r;
+}
+
+/*
+ * GEM ioctls.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_info *args = data;
+	struct ttm_mem_type_manager *man;
+	unsigned i;
+
+	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+
+	args->vram_size = rdev->mc.real_vram_size;
+	args->vram_visible = (u64)man->size << PAGE_SHIFT;
+	if (rdev->stollen_vga_memory)
+		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+	args->vram_visible -= radeon_fbdev_total_size(rdev);
+	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+	for(i = 0; i < RADEON_NUM_RINGS; ++i)
+		args->gart_size -= rdev->ring[i].ring_size;
+	return 0;
+}
+
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *filp)
+{
+	/* TODO: implement */
+	DRM_ERROR("unimplemented %s\n", __func__);
+	return -ENOSYS;
+}
+
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp)
+{
+	/* TODO: implement */
+	DRM_ERROR("unimplemented %s\n", __func__);
+	return -ENOSYS;
+}
+
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_create *args = data;
+	struct drm_gem_object *gobj;
+	uint32_t handle;
+	int r;
+
+	sx_slock(&rdev->exclusive_lock);
+	/* create a gem object to contain this object in */
+	args->size = roundup(args->size, PAGE_SIZE);
+	r = radeon_gem_object_create(rdev, args->size, args->alignment,
+					args->initial_domain, false,
+					false, &gobj);
+	if (r) {
+		if (r == -ERESTARTSYS)
+			r = -EINTR;
+		sx_sunlock(&rdev->exclusive_lock);
+		r = radeon_gem_handle_lockup(rdev, r);
+		return r;
+	}
+	r = drm_gem_handle_create(filp, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(gobj);
+	if (r) {
+		sx_sunlock(&rdev->exclusive_lock);
+		r = radeon_gem_handle_lockup(rdev, r);
+		return r;
+	}
+	args->handle = handle;
+	sx_sunlock(&rdev->exclusive_lock);
+	return 0;
+}
+
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	/* transition the BO to a domain -
+	 * just validate the BO into a certain domain */
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_set_domain *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+
+	/* for now if someone requests domain CPU -
+	 * just make sure the buffer is finished with */
+	sx_slock(&rdev->exclusive_lock);
+
+	/* just do a BO wait for now */
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		sx_sunlock(&rdev->exclusive_lock);
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+
+	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
+
+	drm_gem_object_unreference_unlocked(gobj);
+	sx_sunlock(&rdev->exclusive_lock);
+	r = radeon_gem_handle_lockup(robj->rdev, r);
+	return r;
+}
+
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p)
+{
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+
+	gobj = drm_gem_object_lookup(dev, filp, handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+	*offset_p = radeon_bo_mmap_offset(robj);
+	drm_gem_object_unreference_unlocked(gobj);
+	return 0;
+}
+
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct drm_radeon_gem_mmap *args = data;
+
+	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+}
+
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_busy *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+	uint32_t cur_placement = 0;
+
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+	r = radeon_bo_wait(robj, &cur_placement, true);
+	switch (cur_placement) {
+	case TTM_PL_VRAM:
+		args->domain = RADEON_GEM_DOMAIN_VRAM;
+		break;
+	case TTM_PL_TT:
+		args->domain = RADEON_GEM_DOMAIN_GTT;
+		break;
+	case TTM_PL_SYSTEM:
+		args->domain = RADEON_GEM_DOMAIN_CPU;
+	default:
+		break;
+	}
+	drm_gem_object_unreference_unlocked(gobj);
+	r = radeon_gem_handle_lockup(rdev, r);
+	return r;
+}
+
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_wait_idle *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+	r = radeon_bo_wait(robj, NULL, false);
+	/* callback hw specific functions if any */
+	if (rdev->asic->ioctl_wait_idle)
+		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (r == -ERESTARTSYS)
+		r = -EINTR;
+	r = radeon_gem_handle_lockup(rdev, r);
+	return r;
+}
+
+int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct drm_radeon_gem_set_tiling *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r = 0;
+
+	DRM_DEBUG("%d \n", args->handle);
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	robj = gem_to_radeon_bo(gobj);
+	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct drm_radeon_gem_get_tiling *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *rbo;
+	int r = 0;
+
+	DRM_DEBUG("\n");
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	rbo = gem_to_radeon_bo(gobj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		goto out;
+	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+	radeon_bo_unreserve(rbo);
+out:
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct drm_radeon_gem_va *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_fpriv *fpriv = filp->driver_priv;
+	struct radeon_bo *rbo;
+	struct radeon_bo_va *bo_va;
+	u32 invalid_flags;
+	int r = 0;
+
+	if (!rdev->vm_manager.enabled) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -ENOTTY;
+	}
+
+	/* !! DONT REMOVE !!
+	 * We don't support vm_id yet, to be sure we don't have have broken
+	 * userspace, reject anyone trying to use non 0 value thus moving
+	 * forward we can use those fields without breaking existant userspace
+	 */
+	if (args->vm_id) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	if (args->offset < RADEON_VA_RESERVED_SIZE) {
+		dev_err(dev->dev,
+			"offset 0x%lX is in reserved area 0x%X\n",
+			(unsigned long)args->offset,
+			RADEON_VA_RESERVED_SIZE);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	/* don't remove, we need to enforce userspace to set the snooped flag
+	 * otherwise we will endup with broken userspace and we won't be able
+	 * to enable this feature without adding new interface
+	 */
+	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+	if ((args->flags & invalid_flags)) {
+		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+			args->flags, invalid_flags);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+		dev_err(dev->dev, "only supported snooped mapping for now\n");
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	switch (args->operation) {
+	case RADEON_VA_MAP:
+	case RADEON_VA_UNMAP:
+		break;
+	default:
+		dev_err(dev->dev, "unsupported operation %d\n",
+			args->operation);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -ENOENT;
+	}
+	rbo = gem_to_radeon_bo(gobj);
+	r = radeon_bo_reserve(rbo, false);
+	if (r) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		drm_gem_object_unreference_unlocked(gobj);
+		return r;
+	}
+	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
+	if (!bo_va) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		drm_gem_object_unreference_unlocked(gobj);
+		return -ENOENT;
+	}
+
+	switch (args->operation) {
+	case RADEON_VA_MAP:
+		if (bo_va->soffset) {
+			args->operation = RADEON_VA_RESULT_VA_EXIST;
+			args->offset = bo_va->soffset;
+			goto out;
+		}
+		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
+		break;
+	case RADEON_VA_UNMAP:
+		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
+		break;
+	default:
+		break;
+	}
+	args->operation = RADEON_VA_RESULT_OK;
+	if (r) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+	}
+out:
+	radeon_bo_unreserve(rbo);
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_gem_object *gobj;
+	uint32_t handle;
+	int r;
+
+	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+	args->size = args->pitch * args->height;
+	args->size = roundup2(args->size, PAGE_SIZE);
+
+	r = radeon_gem_object_create(rdev, args->size, 0,
+				     RADEON_GEM_DOMAIN_VRAM,
+				     false, ttm_bo_type_device,
+				     &gobj);
+	if (r)
+		return -ENOMEM;
+
+	r = drm_gem_handle_create(file_priv, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(gobj);
+	if (r) {
+		return r;
+	}
+	args->handle = handle;
+	return 0;
+}
+
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+			     struct drm_device *dev,
+			     uint32_t handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_gem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_gem.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_gem.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_gem.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,18 @@
+/* $MidnightBSD$ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_gem.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef __RADEON_GEM_H__
+#define	__RADEON_GEM_H__
+
+#include <dev/drm2/drmP.h>
+
+int radeon_gem_object_init(struct drm_gem_object *obj);
+void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+				struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+				struct drm_file *file_priv);
+
+#endif /* !defined(__RADEON_GEM_H__) */


Property changes on: trunk/sys/dev/drm2/radeon/radeon_gem.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_i2c.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_i2c.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_i2c.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1385 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_i2c.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include <dev/iicbus/iic.h>
+#include <dev/iicbus/iiconf.h>
+#include <dev/iicbus/iicbus.h>
+#include "radeon.h"
+#include "atom.h"
+#include "iicbus_if.h"
+#include "iicbb_if.h"
+
+/**
+ * radeon_ddc_probe
+ *
+ */
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
+{
+	u8 out = 0x0;
+	u8 buf[8];
+	int ret;
+	struct iic_msg msgs[] = {
+		{
+			.slave = DDC_ADDR << 1,
+			.flags = 0,
+			.len = 1,
+			.buf = &out,
+		},
+		{
+			.slave = DDC_ADDR << 1,
+			.flags = IIC_M_RD,
+			.len = 8,
+			.buf = buf,
+		}
+	};
+
+	/* on hw with routers, select right port */
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
+
+	if (use_aux) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+		ret = iicbus_transfer(dig->dp_i2c_bus->adapter, msgs, 2);
+	} else {
+		ret = iicbus_transfer(radeon_connector->ddc_bus->adapter, msgs, 2);
+	}
+
+	if (ret != 0)
+		/* Couldn't find an accessible DDC on this connector */
+		return false;
+	/* Probe also for valid EDID header
+	 * EDID header starts with:
+	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+	 * Only the first 6 bytes must be valid as
+	 * drm_edid_block_valid() can fix the last 2 bytes */
+	if (drm_edid_header_is_valid(buf) < 6) {
+		/* Couldn't find an accessible EDID on this
+		 * connector */
+		return false;
+	}
+	return true;
+}
+
+/* bit banging i2c */
+
+static int radeon_iicbb_pre_xfer(device_t dev)
+{
+	struct radeon_i2c_chan *i2c = device_get_softc(dev);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t temp;
+
+	/* RV410 appears to have a bug where the hw i2c in reset
+	 * holds the i2c port in a bad state - switch hw i2c away before
+	 * doing DDC - do this for all r200s/r300s/r400s for safety sake
+	 */
+	if (rec->hw_capable) {
+		if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+			u32 reg;
+
+			if (rdev->family >= CHIP_RV350)
+				reg = RADEON_GPIO_MONID;
+			else if ((rdev->family == CHIP_R300) ||
+				 (rdev->family == CHIP_R350))
+				reg = RADEON_GPIO_DVI_DDC;
+			else
+				reg = RADEON_GPIO_CRT2_DDC;
+
+			sx_xlock(&rdev->dc_hw_i2c_mutex);
+			if (rec->a_clk_reg == reg) {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+			} else {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+			}
+			sx_xunlock(&rdev->dc_hw_i2c_mutex);
+		}
+	}
+
+	/* switch the pads to ddc mode */
+	if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+		temp = RREG32(rec->mask_clk_reg);
+		temp &= ~(1 << 16);
+		WREG32(rec->mask_clk_reg, temp);
+	}
+
+	/* clear the output pin values */
+	temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, temp);
+
+	temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, temp);
+
+	/* set the pins to input */
+	temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, temp);
+
+	temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, temp);
+
+	/* mask the gpio pins for software use */
+	temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, temp);
+	temp = RREG32(rec->mask_clk_reg);
+
+	temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, temp);
+	temp = RREG32(rec->mask_data_reg);
+
+	return 0;
+}
+
+static void radeon_iicbb_post_xfer(device_t dev)
+{
+	struct radeon_i2c_chan *i2c = device_get_softc(dev);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t temp;
+
+	/* unmask the gpio pins for software use */
+	temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, temp);
+	temp = RREG32(rec->mask_clk_reg);
+
+	temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, temp);
+	temp = RREG32(rec->mask_data_reg);
+}
+
+static int radeon_iicbb_get_clock(device_t dev)
+{
+	struct radeon_i2c_chan *i2c = device_get_softc(dev);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* read the value off the pin */
+	val = RREG32(rec->y_clk_reg);
+	val &= rec->y_clk_mask;
+
+	return (val != 0);
+}
+
+
+static int radeon_iicbb_get_data(device_t dev)
+{
+	struct radeon_i2c_chan *i2c = device_get_softc(dev);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* read the value off the pin */
+	val = RREG32(rec->y_data_reg);
+	val &= rec->y_data_mask;
+
+	return (val != 0);
+}
+
+static void radeon_iicbb_set_clock(device_t dev, int clock)
+{
+	struct radeon_i2c_chan *i2c = device_get_softc(dev);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* set pin direction */
+	val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	val |= clock ? 0 : rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, val);
+}
+
+static void radeon_iicbb_set_data(device_t dev, int data)
+{
+	struct radeon_i2c_chan *i2c = device_get_softc(dev);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* set pin direction */
+	val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	val |= data ? 0 : rec->en_data_mask;
+	WREG32(rec->en_data_reg, val);
+}
+
+static int
+radeon_iicbb_probe(device_t dev)
+{
+
+	return (BUS_PROBE_DEFAULT);
+}
+
+static int
+radeon_iicbb_attach(device_t dev)
+{
+	struct radeon_i2c_chan *i2c;
+	device_t iic_dev;
+
+	i2c = device_get_softc(dev);
+	device_set_desc(dev, i2c->name);
+
+	/* add generic bit-banging code */
+	iic_dev = device_add_child(dev, "iicbb", -1);
+	if (iic_dev == NULL)
+		return (ENXIO);
+	device_quiet(iic_dev);
+
+	/* attach and probe added child */
+	bus_generic_attach(dev);
+
+	return (0);
+}
+
+static int
+radeon_iicbb_detach(device_t dev)
+{
+
+	/* detach bit-banding code. */
+	bus_generic_detach(dev);
+
+	/* delete bit-banding code. */
+	device_delete_children(dev);
+	return (0);
+}
+
+static int
+radeon_iicbb_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr)
+{
+
+	/* Not sure what to do here. */
+	return 0;
+}
+
+static device_method_t radeon_iicbb_methods[] =	{
+	DEVMETHOD(device_probe,		radeon_iicbb_probe),
+	DEVMETHOD(device_attach,	radeon_iicbb_attach),
+	DEVMETHOD(device_detach,	radeon_iicbb_detach),
+
+	DEVMETHOD(bus_add_child,	bus_generic_add_child),
+	DEVMETHOD(bus_print_child,	bus_generic_print_child),
+
+	DEVMETHOD(iicbb_reset,		radeon_iicbb_reset),
+	DEVMETHOD(iicbb_pre_xfer,	radeon_iicbb_pre_xfer),
+	DEVMETHOD(iicbb_post_xfer,	radeon_iicbb_post_xfer),
+	DEVMETHOD(iicbb_setsda,		radeon_iicbb_set_data),
+	DEVMETHOD(iicbb_setscl,		radeon_iicbb_set_clock),
+	DEVMETHOD(iicbb_getsda,		radeon_iicbb_get_data),
+	DEVMETHOD(iicbb_getscl,		radeon_iicbb_get_clock),
+	DEVMETHOD_END
+};
+
+static driver_t radeon_iicbb_driver = {
+	"radeon_iicbb",
+	radeon_iicbb_methods,
+	0 /* softc will be allocated by parent */
+};
+static devclass_t radeon_iicbb_devclass;
+DRIVER_MODULE_ORDERED(radeon_iicbb, drmn, radeon_iicbb_driver,
+    radeon_iicbb_devclass, 0, 0, SI_ORDER_FIRST);
+DRIVER_MODULE(iicbb, radeon_iicbb, iicbb_driver, iicbb_devclass, 0, 0);
+
+/* hw i2c */
+
+static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
+{
+	u32 sclk = rdev->pm.current_sclk;
+	u32 prescale = 0;
+	u32 nm;
+	u8 n, m, loop;
+	int i2c_clock;
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+		i2c_clock = 60;
+		nm = (sclk * 10) / (i2c_clock * 4);
+		for (loop = 1; loop < 255; loop++) {
+			if ((nm / loop) < loop)
+				break;
+		}
+		n = loop - 1;
+		m = loop - 2;
+		prescale = m | (n << 8);
+		break;
+	case CHIP_RV380:
+	case CHIP_RS400:
+	case CHIP_RS480:
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+		prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+		break;
+	case CHIP_RS600:
+	case CHIP_RS690:
+	case CHIP_RS740:
+		/* todo */
+		break;
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		i2c_clock = 50;
+		if (rdev->family == CHIP_R520)
+			prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
+		else
+			prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV670:
+		/* todo */
+		break;
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		/* todo */
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* todo */
+		break;
+	default:
+		DRM_ERROR("i2c: unhandled radeon chip\n");
+		break;
+	}
+	return prescale;
+}
+
+
+/* hw i2c engine for r1xx-4xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r100_hw_i2c_xfer(struct radeon_i2c_chan *i2c,
+			    struct iic_msg *msgs, int num)
+{
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	struct iic_msg *p;
+	int i, j, k, ret = 0;
+	u32 prescale;
+	u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
+	u32 tmp, reg;
+
+	sx_xlock(&rdev->dc_hw_i2c_mutex);
+	/* take the pm lock since we need a constant sclk */
+	sx_xlock(&rdev->pm.mutex);
+
+	prescale = radeon_get_i2c_prescale(rdev);
+
+	reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+	       RADEON_I2C_DRIVE_EN |
+	       RADEON_I2C_START |
+	       RADEON_I2C_STOP |
+	       RADEON_I2C_GO);
+
+	if (rdev->is_atom_bios) {
+		tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+		WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+	}
+
+	if (rec->mm_i2c) {
+		i2c_cntl_0 = RADEON_I2C_CNTL_0;
+		i2c_cntl_1 = RADEON_I2C_CNTL_1;
+		i2c_data = RADEON_I2C_DATA;
+	} else {
+		i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
+		i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
+		i2c_data = RADEON_DVI_I2C_DATA;
+
+		switch (rdev->family) {
+		case CHIP_R100:
+		case CHIP_RV100:
+		case CHIP_RS100:
+		case CHIP_RV200:
+		case CHIP_RS200:
+		case CHIP_RS300:
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				/* no gpio select bit */
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_R200:
+			/* only bit 4 on r200 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_MONID:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_RV250:
+		case CHIP_RV280:
+			/* bits 3 and 4 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+				break;
+			case RADEON_GPIO_CRT2_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_R300:
+		case CHIP_R350:
+			/* only bit 4 on r300/r350 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_RV350:
+		case CHIP_RV380:
+		case CHIP_R420:
+		case CHIP_R423:
+		case CHIP_RV410:
+		case CHIP_RS400:
+		case CHIP_RS480:
+			/* bits 3 and 4 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+				break;
+			case RADEON_GPIO_MONID:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		default:
+			DRM_ERROR("unsupported asic\n");
+			ret = -EINVAL;
+			goto done;
+			break;
+		}
+	}
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+				    RADEON_I2C_NACK |
+				    RADEON_I2C_HALT |
+				    RADEON_I2C_SOFT_RST));
+		WREG32(i2c_data, (p->slave << 1) & 0xff);
+		WREG32(i2c_data, 0);
+		WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+				    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+				    RADEON_I2C_EN |
+				    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+		WREG32(i2c_cntl_0, reg);
+		for (k = 0; k < 32; k++) {
+			udelay(10);
+			tmp = RREG32(i2c_cntl_0);
+			if (tmp & RADEON_I2C_GO)
+				continue;
+			tmp = RREG32(i2c_cntl_0);
+			if (tmp & RADEON_I2C_DONE)
+				break;
+			else {
+				DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+				WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+				ret = -EIO;
+				goto done;
+			}
+		}
+		goto done;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		for (j = 0; j < p->len; j++) {
+			if (p->flags & IIC_M_RD) {
+				WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+						    RADEON_I2C_NACK |
+						    RADEON_I2C_HALT |
+						    RADEON_I2C_SOFT_RST));
+				WREG32(i2c_data, ((p->slave << 1) & 0xff) | 0x1);
+				WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+						    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+						    RADEON_I2C_EN |
+						    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+				WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
+				for (k = 0; k < 32; k++) {
+					udelay(10);
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_GO)
+						continue;
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+						WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				p->buf[j] = RREG32(i2c_data) & 0xff;
+			} else {
+				WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+						    RADEON_I2C_NACK |
+						    RADEON_I2C_HALT |
+						    RADEON_I2C_SOFT_RST));
+				WREG32(i2c_data, (p->slave << 1) & 0xff);
+				WREG32(i2c_data, p->buf[j]);
+				WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+						    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+						    RADEON_I2C_EN |
+						    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+				WREG32(i2c_cntl_0, reg);
+				for (k = 0; k < 32; k++) {
+					udelay(10);
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_GO)
+						continue;
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+						WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+			}
+		}
+	}
+
+done:
+	WREG32(i2c_cntl_0, 0);
+	WREG32(i2c_cntl_1, 0);
+	WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+			    RADEON_I2C_NACK |
+			    RADEON_I2C_HALT |
+			    RADEON_I2C_SOFT_RST));
+
+	if (rdev->is_atom_bios) {
+		tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+		tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+		WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+	}
+
+	sx_xunlock(&rdev->pm.mutex);
+	sx_xunlock(&rdev->dc_hw_i2c_mutex);
+
+	return ret;
+}
+
+/* hw i2c engine for r5xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r500_hw_i2c_xfer(struct radeon_i2c_chan *i2c,
+			    struct iic_msg *msgs, int num)
+{
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	struct iic_msg *p;
+	int i, j, remaining, current_count, buffer_offset, ret = 0;
+	u32 prescale;
+	u32 tmp, reg;
+	u32 saved1, saved2;
+
+	sx_xlock(&rdev->dc_hw_i2c_mutex);
+	/* take the pm lock since we need a constant sclk */
+	sx_xlock(&rdev->pm.mutex);
+
+	prescale = radeon_get_i2c_prescale(rdev);
+
+	/* clear gpio mask bits */
+	tmp = RREG32(rec->mask_clk_reg);
+	tmp &= ~rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, tmp);
+	tmp = RREG32(rec->mask_clk_reg);
+
+	tmp = RREG32(rec->mask_data_reg);
+	tmp &= ~rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, tmp);
+	tmp = RREG32(rec->mask_data_reg);
+
+	/* clear pin values */
+	tmp = RREG32(rec->a_clk_reg);
+	tmp &= ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, tmp);
+	tmp = RREG32(rec->a_clk_reg);
+
+	tmp = RREG32(rec->a_data_reg);
+	tmp &= ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, tmp);
+	tmp = RREG32(rec->a_data_reg);
+
+	/* set the pins to input */
+	tmp = RREG32(rec->en_clk_reg);
+	tmp &= ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, tmp);
+	tmp = RREG32(rec->en_clk_reg);
+
+	tmp = RREG32(rec->en_data_reg);
+	tmp &= ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, tmp);
+	tmp = RREG32(rec->en_data_reg);
+
+	/* */
+	tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+	WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+	saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
+	saved2 = RREG32(0x494);
+	WREG32(0x494, saved2 | 0x1);
+
+	WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
+	for (i = 0; i < 50; i++) {
+		udelay(1);
+		if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
+			break;
+	}
+	if (i == 50) {
+		DRM_ERROR("failed to get i2c bus\n");
+		ret = -EBUSY;
+		goto done;
+	}
+
+	reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
+	switch (rec->mask_clk_reg) {
+	case AVIVO_DC_GPIO_DDC1_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
+		break;
+	case AVIVO_DC_GPIO_DDC2_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
+		break;
+	case AVIVO_DC_GPIO_DDC3_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
+		break;
+	default:
+		DRM_ERROR("gpio not supported with hw i2c\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+					      AVIVO_DC_I2C_NACK |
+					      AVIVO_DC_I2C_HALT));
+		WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+		udelay(1);
+		WREG32(AVIVO_DC_I2C_RESET, 0);
+
+		WREG32(AVIVO_DC_I2C_DATA, (p->slave << 1) & 0xff);
+		WREG32(AVIVO_DC_I2C_DATA, 0);
+
+		WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+		WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+					       AVIVO_DC_I2C_DATA_COUNT(1) |
+					       (prescale << 16)));
+		WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+		WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+		for (j = 0; j < 200; j++) {
+			udelay(50);
+			tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+			if (tmp & AVIVO_DC_I2C_GO)
+				continue;
+			tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+			if (tmp & AVIVO_DC_I2C_DONE)
+				break;
+			else {
+				DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+				ret = -EIO;
+				goto done;
+			}
+		}
+		goto done;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		remaining = p->len;
+		buffer_offset = 0;
+		if (p->flags & IIC_M_RD) {
+			while (remaining) {
+				if (remaining > 15)
+					current_count = 15;
+				else
+					current_count = remaining;
+				WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+							      AVIVO_DC_I2C_NACK |
+							      AVIVO_DC_I2C_HALT));
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+				udelay(1);
+				WREG32(AVIVO_DC_I2C_RESET, 0);
+
+				WREG32(AVIVO_DC_I2C_DATA, ((p->slave << 1) & 0xff) | 0x1);
+				WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+				WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+							       AVIVO_DC_I2C_DATA_COUNT(current_count) |
+							       (prescale << 16)));
+				WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
+				WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+				for (j = 0; j < 200; j++) {
+					udelay(50);
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_GO)
+						continue;
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+						WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				for (j = 0; j < current_count; j++)
+					p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
+				remaining -= current_count;
+				buffer_offset += current_count;
+			}
+		} else {
+			while (remaining) {
+				if (remaining > 15)
+					current_count = 15;
+				else
+					current_count = remaining;
+				WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+							      AVIVO_DC_I2C_NACK |
+							      AVIVO_DC_I2C_HALT));
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+				udelay(1);
+				WREG32(AVIVO_DC_I2C_RESET, 0);
+
+				WREG32(AVIVO_DC_I2C_DATA, (p->slave << 1) & 0xff);
+				for (j = 0; j < current_count; j++)
+					WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
+
+				WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+				WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+							       AVIVO_DC_I2C_DATA_COUNT(current_count) |
+							       (prescale << 16)));
+				WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+				WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+				for (j = 0; j < 200; j++) {
+					udelay(50);
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_GO)
+						continue;
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+						WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				remaining -= current_count;
+				buffer_offset += current_count;
+			}
+		}
+	}
+
+done:
+	WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+				      AVIVO_DC_I2C_NACK |
+				      AVIVO_DC_I2C_HALT));
+	WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+	udelay(1);
+	WREG32(AVIVO_DC_I2C_RESET, 0);
+
+	WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
+	WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
+	WREG32(0x494, saved2);
+	tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+	tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+	WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+
+	sx_xunlock(&rdev->pm.mutex);
+	sx_xunlock(&rdev->dc_hw_i2c_mutex);
+
+	return ret;
+}
+
+static int radeon_hw_i2c_xfer(device_t dev,
+			      struct iic_msg *msgs, uint32_t num)
+{
+	struct radeon_i2c_chan *i2c = device_get_softc(dev);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	int ret = 0;
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+	case CHIP_RV380:
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+	case CHIP_RS400:
+	case CHIP_RS480:
+		ret = r100_hw_i2c_xfer(i2c, msgs, num);
+		break;
+	case CHIP_RS600:
+	case CHIP_RS690:
+	case CHIP_RS740:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		if (rec->mm_i2c)
+			ret = r100_hw_i2c_xfer(i2c, msgs, num);
+		else
+			ret = r500_hw_i2c_xfer(i2c, msgs, num);
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV670:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* XXX fill in hw i2c implementation */
+		break;
+	default:
+		DRM_ERROR("i2c: unhandled radeon chip\n");
+		ret = -EIO;
+		break;
+	}
+
+	return -ret;
+}
+
+static int
+radeon_hw_i2c_probe(device_t dev)
+{
+
+	return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+radeon_hw_i2c_attach(device_t dev)
+{
+	struct radeon_i2c_chan *i2c;
+	device_t iic_dev;
+
+	i2c = device_get_softc(dev);
+	device_set_desc(dev, i2c->name);
+
+	/* add generic bit-banging code */
+	iic_dev = device_add_child(dev, "iicbus", -1);
+	if (iic_dev == NULL)
+		return (ENXIO);
+	device_quiet(iic_dev);
+
+	/* attach and probe added child */
+	bus_generic_attach(dev);
+
+	return (0);
+}
+
+static int
+radeon_hw_i2c_detach(device_t dev)
+{
+
+	/* detach bit-banding code. */
+	bus_generic_detach(dev);
+
+	/* delete bit-banding code. */
+	device_delete_children(dev);
+	return (0);
+}
+
+static int
+radeon_hw_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr)
+{
+
+	/* Not sure what to do here. */
+	return 0;
+}
+
+
+static device_method_t radeon_hw_i2c_methods[] = {
+	DEVMETHOD(device_probe,		radeon_hw_i2c_probe),
+	DEVMETHOD(device_attach,	radeon_hw_i2c_attach),
+	DEVMETHOD(device_detach,	radeon_hw_i2c_detach),
+	DEVMETHOD(iicbus_reset,		radeon_hw_i2c_reset),
+	DEVMETHOD(iicbus_transfer,	radeon_hw_i2c_xfer),
+	DEVMETHOD_END
+};
+
+static driver_t radeon_hw_i2c_driver = {
+	"radeon_hw_i2c",
+	radeon_hw_i2c_methods,
+	0 /* softc will be allocated by parent */
+};
+
+static devclass_t radeon_hw_i2c_devclass;
+DRIVER_MODULE_ORDERED(radeon_hw_i2c, drm, radeon_hw_i2c_driver,
+    radeon_hw_i2c_devclass, 0, 0, SI_ORDER_FIRST);
+DRIVER_MODULE(iicbus, radeon_hw_i2c, iicbus_driver, iicbus_devclass, 0, 0);
+
+struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+					  struct radeon_i2c_bus_rec *rec,
+					  const char *name)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_chan *i2c;
+	device_t iicbus_dev;
+	int ret;
+
+	/* don't add the mm_i2c bus unless hw_i2c is enabled */
+	if (rec->mm_i2c && (radeon_hw_i2c == 0))
+		return NULL;
+
+	i2c = malloc(sizeof(struct radeon_i2c_chan),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (i2c == NULL)
+		return NULL;
+
+	/*
+	 * Grab Giant before messing with newbus devices, just in case
+	 * we do not hold it already.
+	 */
+	mtx_lock(&Giant);
+
+	i2c->rec = *rec;
+	i2c->dev = dev;
+	if (rec->mm_i2c ||
+	    (rec->hw_capable &&
+	     radeon_hw_i2c &&
+	     ((rdev->family <= CHIP_RS480) ||
+	      ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
+		/* set the radeon hw i2c adapter */
+		snprintf(i2c->name, sizeof(i2c->name),
+			 "Radeon i2c hw bus %s", name);
+		iicbus_dev = device_add_child(dev->dev, "radeon_hw_i2c", -1);
+		if (iicbus_dev == NULL) {
+			DRM_ERROR("Failed to create bridge for hw i2c %s\n",
+			    name);
+			goto out_free;
+		}
+		device_quiet(iicbus_dev);
+		device_set_softc(iicbus_dev, i2c);
+
+		ret = device_probe_and_attach(iicbus_dev);
+		if (ret != 0) {
+			DRM_ERROR("Attach failed for bridge for hw i2c %s\n",
+			    name);
+			device_delete_child(dev->dev, iicbus_dev);
+			goto out_free;
+		}
+
+		i2c->adapter = device_find_child(iicbus_dev, "iicbus", -1);
+		if (i2c->adapter == NULL) {
+			DRM_ERROR("hw i2c bridge doesn't have iicbus child\n");
+			device_delete_child(dev->dev, iicbus_dev);
+			goto out_free;
+		}
+	} else if (rec->hw_capable &&
+		   radeon_hw_i2c &&
+		   ASIC_IS_DCE3(rdev)) {
+		/* hw i2c using atom */
+		snprintf(i2c->name, sizeof(i2c->name),
+			 "Radeon i2c hw bus %s", name);
+		iicbus_dev = device_add_child(dev->dev, "radeon_atom_hw_i2c", -1);
+		if (iicbus_dev == NULL) {
+			DRM_ERROR("Failed to create bridge for hw i2c %s\n",
+			    name);
+			goto out_free;
+		}
+		device_quiet(iicbus_dev);
+		device_set_softc(iicbus_dev, i2c);
+
+		ret = device_probe_and_attach(iicbus_dev);
+		if (ret != 0) {
+			DRM_ERROR("Attach failed for bridge for hw i2c %s\n",
+			    name);
+			device_delete_child(dev->dev, iicbus_dev);
+			goto out_free;
+		}
+
+		i2c->adapter = device_find_child(iicbus_dev, "iicbus", -1);
+		if (i2c->adapter == NULL) {
+			DRM_ERROR("hw i2c bridge doesn't have iicbus child\n");
+			device_delete_child(dev->dev, iicbus_dev);
+			goto out_free;
+		}
+	} else {
+		device_t iicbb_dev;
+
+		/* set the radeon bit adapter */
+		snprintf(i2c->name, sizeof(i2c->name),
+			 "Radeon i2c bit bus %s", name);
+		iicbus_dev = device_add_child(dev->dev, "radeon_iicbb", -1);
+		if (iicbus_dev == NULL) {
+			DRM_ERROR("Failed to create bridge for bb i2c %s\n",
+			    name);
+			goto out_free;
+		}
+		device_quiet(iicbus_dev);
+		device_set_softc(iicbus_dev, i2c);
+
+		ret = device_probe_and_attach(iicbus_dev);
+		if (ret != 0) {
+			DRM_ERROR("Attach failed for bridge for bb i2c %s\n",
+			    name);
+			device_delete_child(dev->dev, iicbus_dev);
+			goto out_free;
+		}
+
+		iicbb_dev = device_find_child(iicbus_dev, "iicbb", -1);
+		if (iicbb_dev == NULL) {
+			DRM_ERROR("bb i2c bridge doesn't have iicbb child\n");
+			device_delete_child(dev->dev, iicbus_dev);
+			goto out_free;
+		}
+
+		i2c->adapter = device_find_child(iicbb_dev, "iicbus", -1);
+		if (i2c->adapter == NULL) {
+			DRM_ERROR(
+			    "bbbus bridge doesn't have iicbus grandchild\n");
+			device_delete_child(dev->dev, iicbus_dev);
+			goto out_free;
+		}
+	}
+
+	i2c->iic_bus = iicbus_dev;
+
+	mtx_unlock(&Giant);
+
+	return i2c;
+out_free:
+	mtx_unlock(&Giant);
+	free(i2c, DRM_MEM_DRIVER);
+	return NULL;
+
+}
+
+struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+					     struct radeon_i2c_bus_rec *rec,
+					     const char *name)
+{
+	struct radeon_i2c_chan *i2c;
+	int ret;
+
+	i2c = malloc(sizeof(struct radeon_i2c_chan),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (i2c == NULL)
+		return NULL;
+
+	i2c->rec = *rec;
+	i2c->dev = dev;
+	snprintf(i2c->name, sizeof(i2c->name),
+		 "Radeon aux bus %s", name);
+	ret = iic_dp_aux_add_bus(dev->dev, i2c->name,
+	    radeon_dp_i2c_aux_ch, i2c, &i2c->iic_bus,
+	    &i2c->adapter);
+	if (ret) {
+		DRM_INFO("Failed to register i2c %s\n", name);
+		goto out_free;
+	}
+
+	return i2c;
+out_free:
+	free(i2c, DRM_MEM_DRIVER);
+	return NULL;
+
+}
+
+void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+{
+	if (!i2c)
+		return;
+	if (i2c->iic_bus != NULL) {
+		int ret;
+
+		mtx_lock(&Giant);
+		ret = device_delete_child(i2c->dev->dev, i2c->iic_bus);
+		mtx_unlock(&Giant);
+		KASSERT(ret == 0, ("unable to detach iic bus %s: %d",
+		    i2c->name, ret));
+	}
+	free(i2c, DRM_MEM_DRIVER);
+}
+
+/* Add the default buses */
+void radeon_i2c_init(struct radeon_device *rdev)
+{
+	if (rdev->is_atom_bios)
+		radeon_atombios_i2c_init(rdev);
+	else
+		radeon_combios_i2c_init(rdev);
+}
+
+/* remove all the buses */
+void radeon_i2c_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (rdev->i2c_bus[i]) {
+			radeon_i2c_destroy(rdev->i2c_bus[i]);
+			rdev->i2c_bus[i] = NULL;
+		}
+	}
+}
+
+/* Add additional buses */
+void radeon_i2c_add(struct radeon_device *rdev,
+		    struct radeon_i2c_bus_rec *rec,
+		    const char *name)
+{
+	struct drm_device *dev = rdev->ddev;
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (!rdev->i2c_bus[i]) {
+			rdev->i2c_bus[i] = radeon_i2c_create(dev, rec, name);
+			return;
+		}
+	}
+}
+
+/* looks up bus based on id */
+struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+					  struct radeon_i2c_bus_rec *i2c_bus)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (rdev->i2c_bus[i] &&
+		    (rdev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
+			return rdev->i2c_bus[i];
+		}
+	}
+	return NULL;
+}
+
+struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
+{
+	return NULL;
+}
+
+void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+			 u8 slave_addr,
+			 u8 addr,
+			 u8 *val)
+{
+	u8 out_buf[2];
+	u8 in_buf[2];
+	struct iic_msg msgs[] = {
+		{
+			.slave = slave_addr << 1,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.slave = slave_addr << 1,
+			.flags = IIC_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (iicbus_transfer(i2c_bus->adapter, msgs, 2) == 0) {
+		*val = in_buf[0];
+		DRM_DEBUG("val = 0x%02x\n", *val);
+	} else {
+		DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
+			  addr, *val);
+	}
+}
+
+void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
+			 u8 slave_addr,
+			 u8 addr,
+			 u8 val)
+{
+	uint8_t out_buf[2];
+	struct iic_msg msg = {
+		.slave = slave_addr << 1,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = val;
+
+	if (iicbus_transfer(i2c_bus->adapter, &msg, 1) != 0)
+		DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
+			  addr, val);
+}
+
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
+{
+	u8 val;
+
+	if (!radeon_connector->router.ddc_valid)
+		return;
+
+	if (!radeon_connector->router_bus)
+		return;
+
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, val);
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
+	val |= radeon_connector->router.ddc_mux_state;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+	u8 val;
+
+	if (!radeon_connector->router.cd_valid)
+		return;
+
+	if (!radeon_connector->router_bus)
+		return;
+
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, val);
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	val |= radeon_connector->router.cd_mux_state;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, val);
+}
+


Property changes on: trunk/sys/dev/drm2/radeon/radeon_i2c.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_ioc32.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_ioc32.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_ioc32.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,332 @@
+/* $MidnightBSD$ */
+/**
+ * \file radeon_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the Radeon DRM.
+ *
+ * \author Paul Mackerras <paulus at samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_ioc32.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include "opt_compat.h"
+
+#ifdef COMPAT_FREEBSD32
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+
+typedef struct drm_radeon_init32 {
+	int func;
+	u32 sarea_priv_offset;
+	int is_pci;
+	int cp_mode;
+	int gart_size;
+	int ring_size;
+	int usec_timeout;
+
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+
+	u32 fb_offset;
+	u32 mmio_offset;
+	u32 ring_offset;
+	u32 ring_rptr_offset;
+	u32 buffers_offset;
+	u32 gart_textures_offset;
+} drm_radeon_init32_t;
+
+static int compat_radeon_cp_init(struct drm_device *dev, void *arg,
+				 struct drm_file *file_priv)
+{
+	drm_radeon_init32_t *init32;
+	drm_radeon_init_t __user init;
+
+	init32 = arg;
+
+	init.func = init32->func;
+	init.sarea_priv_offset = (unsigned long)init32->sarea_priv_offset;
+	init.is_pci = init32->is_pci;
+	init.cp_mode = init32->cp_mode;
+	init.gart_size = init32->gart_size;
+	init.ring_size = init32->ring_size;
+	init.usec_timeout = init32->usec_timeout;
+	init.fb_bpp = init32->fb_bpp;
+	init.front_offset = init32->front_offset;
+	init.front_pitch = init32->front_pitch;
+	init.back_offset = init32->back_offset;
+	init.back_pitch = init32->back_pitch;
+	init.depth_bpp = init32->depth_bpp;
+	init.depth_offset = init32->depth_offset;
+	init.depth_pitch = init32->depth_pitch;
+	init.fb_offset = (unsigned long)init32->fb_offset;
+	init.mmio_offset = (unsigned long)init32->mmio_offset;
+	init.ring_offset = (unsigned long)init32->ring_offset;
+	init.ring_rptr_offset = (unsigned long)init32->ring_rptr_offset;
+	init.buffers_offset = (unsigned long)init32->buffers_offset;
+	init.gart_textures_offset = (unsigned long)init32->gart_textures_offset;
+
+	return radeon_cp_init(dev, &init, file_priv);
+}
+
+typedef struct drm_radeon_clear32 {
+	unsigned int flags;
+	unsigned int clear_color;
+	unsigned int clear_depth;
+	unsigned int color_mask;
+	unsigned int depth_mask;	/* misnamed field:  should be stencil */
+	u32 depth_boxes;
+} drm_radeon_clear32_t;
+
+static int compat_radeon_cp_clear(struct drm_device *dev, void *arg,
+				  struct drm_file *file_priv)
+{
+	drm_radeon_clear32_t *clr32;
+	drm_radeon_clear_t __user clr;
+
+	clr32 = arg;
+
+	clr.flags = clr32->flags;
+	clr.clear_color = clr32->clear_color;
+	clr.clear_depth = clr32->clear_depth;
+	clr.color_mask = clr32->color_mask;
+	clr.depth_mask = clr32->depth_mask;
+	clr.depth_boxes = (drm_radeon_clear_rect_t *)(unsigned long)clr32->depth_boxes;
+
+	return radeon_ioctls[DRM_IOCTL_RADEON_CLEAR].func(dev, &clr, file_priv);
+}
+
+typedef struct drm_radeon_stipple32 {
+	u32 mask;
+} drm_radeon_stipple32_t;
+
+static int compat_radeon_cp_stipple(struct drm_device *dev, void *arg,
+				    struct drm_file *file_priv)
+{
+	drm_radeon_stipple32_t __user *argp = (void __user *)arg;
+	drm_radeon_stipple_t __user request;
+
+	request.mask = (unsigned int *)(unsigned long)argp->mask;
+
+	return radeon_ioctls[DRM_IOCTL_RADEON_STIPPLE].func(dev, &request, file_priv);
+}
+
+typedef struct drm_radeon_tex_image32 {
+	unsigned int x, y;	/* Blit coordinates */
+	unsigned int width, height;
+	u32 data;
+} drm_radeon_tex_image32_t;
+
+typedef struct drm_radeon_texture32 {
+	unsigned int offset;
+	int pitch;
+	int format;
+	int width;		/* Texture image coordinates */
+	int height;
+	u32 image;
+} drm_radeon_texture32_t;
+
+static int compat_radeon_cp_texture(struct drm_device *dev, void *arg,
+				    struct drm_file *file_priv)
+{
+	drm_radeon_texture32_t *req32;
+	drm_radeon_texture_t __user request;
+	drm_radeon_tex_image32_t *img32;
+	drm_radeon_tex_image_t __user image;
+
+	req32 = arg;
+	if (req32->image == 0)
+		return -EINVAL;
+	img32 = (drm_radeon_tex_image32_t *)(unsigned long)req32->image;
+
+	request.offset = req32->offset;
+	request.pitch = req32->pitch;
+	request.format = req32->format;
+	request.width = req32->width;
+	request.height = req32->height;
+	request.image = ℑ
+	image.x = img32->x;
+	image.y = img32->y;
+	image.width = img32->width;
+	image.height = img32->height;
+	image.data = (void *)(unsigned long)img32->data;
+
+	return radeon_ioctls[DRM_IOCTL_RADEON_TEXTURE].func(dev, &request, file_priv);
+}
+
+typedef struct drm_radeon_vertex2_32 {
+	int idx;		/* Index of vertex buffer */
+	int discard;		/* Client finished with buffer? */
+	int nr_states;
+	u32 state;
+	int nr_prims;
+	u32 prim;
+} drm_radeon_vertex2_32_t;
+
+static int compat_radeon_cp_vertex2(struct drm_device *dev, void *arg,
+				    struct drm_file *file_priv)
+{
+	drm_radeon_vertex2_32_t *req32;
+	drm_radeon_vertex2_t __user request;
+
+	req32 = arg;
+
+	request.idx = req32->idx;
+	request.discard = req32->discard;
+	request.nr_states = req32->nr_states;
+	request.state = (drm_radeon_state_t *)(unsigned long)req32->state;
+	request.nr_prims = req32->nr_prims;
+	request.prim = (drm_radeon_prim_t *)(unsigned long)req32->prim;
+
+	return radeon_ioctls[DRM_IOCTL_RADEON_VERTEX2].func(dev, &request, file_priv);
+}
+
+typedef struct drm_radeon_cmd_buffer32 {
+	int bufsz;
+	u32 buf;
+	int nbox;
+	u32 boxes;
+} drm_radeon_cmd_buffer32_t;
+
+static int compat_radeon_cp_cmdbuf(struct drm_device *dev, void *arg,
+				   struct drm_file *file_priv)
+{
+	drm_radeon_cmd_buffer32_t *req32;
+	drm_radeon_cmd_buffer_t __user request;
+
+	req32 = arg;
+
+	request.bufsz = req32->bufsz;
+	request.buf = (char *)(unsigned long)req32->buf;
+	request.nbox = req32->nbox;
+	request.boxes = (struct drm_clip_rect *)(unsigned long)req32->boxes;
+
+	return radeon_ioctls[DRM_IOCTL_RADEON_CMDBUF].func(dev, &request, file_priv);
+}
+
+typedef struct drm_radeon_getparam32 {
+	int param;
+	u32 value;
+} drm_radeon_getparam32_t;
+
+static int compat_radeon_cp_getparam(struct drm_device *dev, void *arg,
+				     struct drm_file *file_priv)
+{
+	drm_radeon_getparam32_t *req32;
+	drm_radeon_getparam_t __user request;
+
+	req32 = arg;
+
+	request.param = req32->param;
+	request.value = (void *)(unsigned long)req32->value;
+
+	return radeon_ioctls[DRM_IOCTL_RADEON_GETPARAM].func(dev, &request, file_priv);
+}
+
+typedef struct drm_radeon_mem_alloc32 {
+	int region;
+	int alignment;
+	int size;
+	u32 region_offset;	/* offset from start of fb or GART */
+} drm_radeon_mem_alloc32_t;
+
+static int compat_radeon_mem_alloc(struct drm_device *dev, void *arg,
+				   struct drm_file *file_priv)
+{
+	drm_radeon_mem_alloc32_t *req32;
+	drm_radeon_mem_alloc_t __user request;
+
+	req32 = arg;
+
+	request.region = req32->region;
+	request.alignment = req32->alignment;
+	request.size = req32->size;
+	request.region_offset = (int *)(unsigned long)req32->region_offset;
+
+	return radeon_mem_alloc(dev, &request, file_priv);
+}
+
+typedef struct drm_radeon_irq_emit32 {
+	u32 irq_seq;
+} drm_radeon_irq_emit32_t;
+
+static int compat_radeon_irq_emit(struct drm_device *dev, void *arg,
+				  struct drm_file *file_priv)
+{
+	drm_radeon_irq_emit32_t *req32;
+	drm_radeon_irq_emit_t __user request;
+
+	req32 = arg;
+
+	request.irq_seq = (int *)(unsigned long)req32->irq_seq;
+
+	return radeon_irq_emit(dev, &request, file_priv);
+}
+
+/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
+#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
+typedef struct drm_radeon_setparam32 {
+	int param;
+	u64 value;
+} __attribute__((packed)) drm_radeon_setparam32_t;
+
+static int compat_radeon_cp_setparam(struct drm_device *dev, void *arg,
+				     struct drm_file *file_priv)
+{
+	drm_radeon_setparam32_t *req32;
+	drm_radeon_setparam_t __user request;
+
+	req32 = arg;
+
+	request.param = req32->param;
+	request.value = req32->value;
+
+	return radeon_ioctls[DRM_IOCTL_RADEON_SETPARAM].func(dev, &request, file_priv);
+}
+#else
+#define compat_radeon_cp_setparam NULL
+#endif /* X86_64 || IA64 */
+
+struct drm_ioctl_desc radeon_compat_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, compat_radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_RADEON_CLEAR, compat_radeon_cp_clear, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, compat_radeon_cp_stipple, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, compat_radeon_cp_texture, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, compat_radeon_cp_vertex2, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, compat_radeon_cp_cmdbuf, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, compat_radeon_cp_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, compat_radeon_cp_setparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_ALLOC, compat_radeon_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, compat_radeon_irq_emit, DRM_AUTH)
+};
+int radeon_num_compat_ioctls = ARRAY_SIZE(radeon_compat_ioctls);
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_ioc32.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_irq.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_irq.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_irq.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,404 @@
+/* $MidnightBSD$ */
+/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith at tungstengraphics.com>
+ *    Michel D�zer <michel at daenzer.net>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_irq.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+
+void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (state)
+		dev_priv->irq_enable_reg |= mask;
+	else
+		dev_priv->irq_enable_reg &= ~mask;
+
+	if (dev->irq_enabled)
+		RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+}
+
+static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (state)
+		dev_priv->r500_disp_irq_reg |= mask;
+	else
+		dev_priv->r500_disp_irq_reg &= ~mask;
+
+	if (dev->irq_enabled)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+}
+
+int radeon_enable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		switch (crtc) {
+		case 0:
+			r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
+			break;
+		case 1:
+			r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			return -EINVAL;
+		}
+	} else {
+		switch (crtc) {
+		case 0:
+			radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
+			break;
+		case 1:
+			radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void radeon_disable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		switch (crtc) {
+		case 0:
+			r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
+			break;
+		case 1:
+			r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			break;
+		}
+	} else {
+		switch (crtc) {
+		case 0:
+			radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
+			break;
+		case 1:
+			radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			break;
+		}
+	}
+}
+
+static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
+{
+	u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
+	u32 irq_mask = RADEON_SW_INT_TEST;
+
+	*r500_disp_int = 0;
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		/* vbl interrupts in a different place */
+
+		if (irqs & R500_DISPLAY_INT_STATUS) {
+			/* if a display interrupt */
+			u32 disp_irq;
+
+			disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
+
+			*r500_disp_int = disp_irq;
+			if (disp_irq & R500_D1_VBLANK_INTERRUPT)
+				RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+			if (disp_irq & R500_D2_VBLANK_INTERRUPT)
+				RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+		}
+		irq_mask |= R500_DISPLAY_INT_STATUS;
+	} else
+		irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
+
+	irqs &=	irq_mask;
+
+	if (irqs)
+		RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+
+	return irqs;
+}
+
+/* Interrupts - Used for device synchronization and flushing in the
+ * following circumstances:
+ *
+ * - Exclusive FB access with hw idle:
+ *    - Wait for GUI Idle (?) interrupt, then do normal flush.
+ *
+ * - Frame throttling, NV_fence:
+ *    - Drop marker irq's into command stream ahead of time.
+ *    - Wait on irq's with lock *not held*
+ *    - Check each for termination condition
+ *
+ * - Internally in cp_getbuffer, etc:
+ *    - as above, but wait with lock held???
+ *
+ * NOTE: These functions are misleadingly named -- the irq's aren't
+ * tied to dma at all, this is just a hangover from dri prehistory.
+ */
+
+irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	u32 stat;
+	u32 r500_disp_int;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return IRQ_NONE;
+
+	/* Only consider the bits we're interested in - others could be used
+	 * outside the DRM
+	 */
+	stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
+	if (!stat)
+		return IRQ_NONE;
+
+	stat &= dev_priv->irq_enable_reg;
+
+	/* SW interrupt */
+	if (stat & RADEON_SW_INT_TEST)
+		DRM_WAKEUP(&dev_priv->swi_queue);
+
+	/* VBLANK interrupt */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
+			drm_handle_vblank(dev, 0);
+		if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
+			drm_handle_vblank(dev, 1);
+	} else {
+		if (stat & RADEON_CRTC_VBLANK_STAT)
+			drm_handle_vblank(dev, 0);
+		if (stat & RADEON_CRTC2_VBLANK_STAT)
+			drm_handle_vblank(dev, 1);
+	}
+	return IRQ_HANDLED;
+}
+
+static int radeon_emit_irq(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	unsigned int ret;
+	RING_LOCALS;
+
+	atomic_inc(&dev_priv->swi_emitted);
+	ret = atomic_read(&dev_priv->swi_emitted);
+
+	BEGIN_RING(4);
+	OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+	OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return ret;
+}
+
+static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	int ret = 0;
+
+	if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
+		return 0;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
+		    RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
+
+	return ret;
+}
+
+u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if (crtc < 0 || crtc > 1) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		if (crtc == 0)
+			return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
+		else
+			return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
+	} else {
+		if (crtc == 0)
+			return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
+		else
+			return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
+	}
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_irq_emit_t *emit = data;
+	int result;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return -EINVAL;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	result = radeon_emit_irq(dev);
+
+	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_irq_wait_t *irqwait = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return -EINVAL;
+
+	return radeon_wait_irq(dev, irqwait->irq_seq);
+}
+
+/* drm_dma.h hooks
+*/
+void radeon_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	u32 dummy;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return;
+
+	/* Disable *all* interrupts */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+	/* Clear bits if they're already high */
+	radeon_acknowledge_irqs(dev_priv, &dummy);
+}
+
+int radeon_driver_irq_postinstall(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+
+	atomic_set(&dev_priv->swi_emitted, 0);
+	DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+
+	dev->max_vblank_count = 0x001fffff;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return 0;
+
+	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+	return 0;
+}
+
+void radeon_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+	/* Disable *all* interrupts */
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+}
+
+
+int radeon_vblank_crtc_get(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+
+	return dev_priv->vblank_crtc;
+}
+
+int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+	if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+		DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
+		return -EINVAL;
+	}
+	dev_priv->vblank_crtc = (unsigned int)value;
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_irq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_irq_kms.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_irq_kms.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_irq_kms.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,484 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_irq_kms.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon_irq_kms.h"
+#include "radeon.h"
+#include "atom.h"
+
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+
+/**
+ * radeon_driver_irq_handler_kms - irq handler for KMS
+ *
+ * @DRM_IRQ_ARGS: args
+ *
+ * This is the irq handler for the radeon KMS driver (all asics).
+ * radeon_irq_process is a macro that points to the per-asic
+ * irq handler callback.
+ */
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_irq_process(rdev);
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+/**
+ * radeon_hotplug_work_func - display hotplug work handler
+ *
+ * @work: work struct
+ *
+ * This is the hot plug event work handler (all asics).
+ * The work gets scheduled from the irq handler if there
+ * was a hot plug interrupt.  It walks the connector table
+ * and calls the hotplug handler for each one, then sends
+ * a drm hotplug event to alert userspace.
+ */
+static void radeon_hotplug_work_func(void *arg, int pending)
+{
+	struct radeon_device *rdev = arg;
+	struct drm_device *dev = rdev->ddev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	if (mode_config->num_connector) {
+		list_for_each_entry(connector, &mode_config->connector_list, head)
+			radeon_connector_hotplug(connector);
+	}
+	/* Just fire off a uevent and let userspace tell us what to do */
+	drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Gets the hw ready to enable irqs (all asics).
+ * This function disables all interrupt sources on the GPU.
+ */
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+	unsigned i;
+
+	DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+	/* Disable *all* interrupts */
+	for (i = 0; i < RADEON_NUM_RINGS; i++)
+		atomic_set(&rdev->irq.ring_int[i], 0);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+		rdev->irq.hpd[i] = false;
+	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+		rdev->irq.crtc_vblank_int[i] = false;
+		atomic_set(&rdev->irq.pflip[i], 0);
+		rdev->irq.afmt[i] = false;
+	}
+	radeon_irq_set(rdev);
+	DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+	/* Clear bits */
+	radeon_irq_process(rdev);
+}
+
+/**
+ * radeon_driver_irq_postinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Handles stuff to be done after enabling irqs (all asics).
+ * Returns 0 on success.
+ */
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
+{
+	dev->max_vblank_count = 0x001fffff;
+	return 0;
+}
+
+/**
+ * radeon_driver_irq_uninstall_kms - drm irq uninstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * This function disables all interrupt sources on the GPU (all asics).
+ */
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+	unsigned i;
+
+	if (rdev == NULL) {
+		return;
+	}
+	DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+	/* Disable *all* interrupts */
+	for (i = 0; i < RADEON_NUM_RINGS; i++)
+		atomic_set(&rdev->irq.ring_int[i], 0);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+		rdev->irq.hpd[i] = false;
+	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+		rdev->irq.crtc_vblank_int[i] = false;
+		atomic_set(&rdev->irq.pflip[i], 0);
+		rdev->irq.afmt[i] = false;
+	}
+	radeon_irq_set(rdev);
+	DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_msi_ok - asic specific msi checks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Handles asic specific MSI checks to determine if
+ * MSIs should be enabled on a particular chip (all asics).
+ * Returns true if MSIs should be enabled, false if MSIs
+ * should not be enabled.
+ */
+static bool radeon_msi_ok(struct radeon_device *rdev)
+{
+	/* RV370/RV380 was first asic with MSI support */
+	if (rdev->family < CHIP_RV380)
+		return false;
+
+	/* MSIs don't work on AGP */
+	if (rdev->flags & RADEON_IS_AGP)
+		return false;
+
+	/* force MSI on */
+	if (radeon_msi == 1)
+		return true;
+	else if (radeon_msi == 0)
+		return false;
+
+	/* Quirks */
+	/* HP RS690 only seems to work with MSIs. */
+	if ((rdev->ddev->pci_device == 0x791f) &&
+	    (rdev->ddev->pci_subvendor == 0x103c) &&
+	    (rdev->ddev->pci_subdevice == 0x30c2))
+		return true;
+
+	/* Dell RS690 only seems to work with MSIs. */
+	if ((rdev->ddev->pci_device == 0x791f) &&
+	    (rdev->ddev->pci_subvendor == 0x1028) &&
+	    (rdev->ddev->pci_subdevice == 0x01fc))
+		return true;
+
+	/* Dell RS690 only seems to work with MSIs. */
+	if ((rdev->ddev->pci_device == 0x791f) &&
+	    (rdev->ddev->pci_subvendor == 0x1028) &&
+	    (rdev->ddev->pci_subdevice == 0x01fd))
+		return true;
+
+	/* Gateway RS690 only seems to work with MSIs. */
+	if ((rdev->ddev->pci_device == 0x791f) &&
+	    (rdev->ddev->pci_subvendor == 0x107b) &&
+	    (rdev->ddev->pci_subdevice == 0x0185))
+		return true;
+
+	/* try and enable MSIs by default on all RS690s */
+	if (rdev->family == CHIP_RS690)
+		return true;
+
+	/* RV515 seems to have MSI issues where it loses
+	 * MSI rearms occasionally. This leads to lockups and freezes.
+	 * disable it by default.
+	 */
+	if (rdev->family == CHIP_RV515)
+		return false;
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* APUs work fine with MSIs */
+		if (rdev->family >= CHIP_PALM)
+			return true;
+		/* lots of IGPs have problems with MSIs */
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * radeon_irq_kms_init - init driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_irq_kms_init(struct radeon_device *rdev)
+{
+	int r = 0;
+
+	TASK_INIT(&rdev->hotplug_work, 0, radeon_hotplug_work_func, rdev);
+	TASK_INIT(&rdev->audio_work, 0, r600_audio_update_hdmi, rdev);
+
+	DRM_SPININIT(&rdev->irq.lock, "drm__radeon_device__irq__lock");
+	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
+	if (r) {
+		return r;
+	}
+	/* enable msi */
+	rdev->msi_enabled = 0;
+
+	if (radeon_msi_ok(rdev)) {
+		int ret = drm_pci_enable_msi(rdev->ddev);
+		if (!ret) {
+			rdev->msi_enabled = 1;
+			dev_info(rdev->dev, "radeon: using MSI.\n");
+		}
+	}
+	rdev->irq.installed = true;
+	r = drm_irq_install(rdev->ddev);
+	if (r) {
+		rdev->irq.installed = false;
+		return r;
+	}
+	DRM_INFO("radeon: irq initialized.\n");
+	return 0;
+}
+
+/**
+ * radeon_irq_kms_fini - tear down driver interrrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ */
+void radeon_irq_kms_fini(struct radeon_device *rdev)
+{
+	drm_vblank_cleanup(rdev->ddev);
+	if (rdev->irq.installed) {
+		drm_irq_uninstall(rdev->ddev);
+		rdev->irq.installed = false;
+		if (rdev->msi_enabled)
+			drm_pci_disable_msi(rdev->ddev);
+	}
+	taskqueue_drain(rdev->tq, &rdev->hotplug_work);
+}
+
+/**
+ * radeon_irq_kms_sw_irq_get - enable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to enable
+ *
+ * Enables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
+		DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_sw_irq_put - disable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to disable
+ *
+ * Disables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
+		DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_get - enable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to enable
+ *
+ * Enables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
+		DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_put - disable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to disable
+ *
+ * Disables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
+		DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_enable_afmt - enable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to enable
+ *
+ * Enables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+	rdev->irq.afmt[block] = true;
+	radeon_irq_set(rdev);
+	DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+
+}
+
+/**
+ * radeon_irq_kms_disable_afmt - disable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to disable
+ *
+ * Disables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+	rdev->irq.afmt[block] = false;
+	radeon_irq_set(rdev);
+	DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_enable_hpd - enable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to enable.
+ *
+ * Enables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+	unsigned long irqflags;
+	int i;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+		rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
+	radeon_irq_set(rdev);
+	DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_disable_hpd - disable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to disable.
+ *
+ * Disables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+	unsigned long irqflags;
+	int i;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	DRM_SPINLOCK_IRQSAVE(&rdev->irq.lock, irqflags);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+		rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
+	radeon_irq_set(rdev);
+	DRM_SPINUNLOCK_IRQRESTORE(&rdev->irq.lock, irqflags);
+}
+


Property changes on: trunk/sys/dev/drm2/radeon/radeon_irq_kms.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_irq_kms.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_irq_kms.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_irq_kms.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,14 @@
+/* $MidnightBSD$ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_irq_kms.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#ifndef __RADEON_IRQ_KMS_H__
+#define	__RADEON_IRQ_KMS_H__
+
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
+
+#endif /* !defined(__RADEON_IRQ_KMS_H__) */


Property changes on: trunk/sys/dev/drm2/radeon/radeon_irq_kms.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_kms.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_kms.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_kms.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,732 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_kms.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_asic.h"
+#include "radeon_kms.h"
+
+/**
+ * radeon_driver_unload_kms - Main unload function for KMS.
+ *
+ * @dev: drm dev pointer
+ *
+ * This is the main unload function for KMS (all asics).
+ * It calls radeon_modeset_fini() to tear down the
+ * displays, and radeon_device_fini() to tear down
+ * the rest of the device (CP, writeback, etc.).
+ * Returns 0 on success.
+ */
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (rdev == NULL)
+		return 0;
+	if (rdev->rmmio == NULL)
+		goto done_free;
+	radeon_acpi_fini(rdev);
+	radeon_modeset_fini(rdev);
+	radeon_device_fini(rdev);
+
+done_free:
+	free(rdev, DRM_MEM_DRIVER);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+/**
+ * radeon_driver_load_kms - Main load function for KMS.
+ *
+ * @dev: drm dev pointer
+ * @flags: device flags
+ *
+ * This is the main load function for KMS (all asics).
+ * It calls radeon_device_init() to set up the non-display
+ * parts of the chip (asic init, CP, writeback, etc.), and
+ * radeon_modeset_init() to set up the display parts
+ * (crtcs, encoders, hotplug detect, etc.).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+{
+	struct radeon_device *rdev;
+	int r, acpi_status;
+
+	rdev = malloc(sizeof(struct radeon_device), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (rdev == NULL) {
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)rdev;
+
+	/* update BUS flag */
+	if (drm_pci_device_is_agp(dev)) {
+		DRM_INFO("RADEON_IS_AGP\n");
+		flags |= RADEON_IS_AGP;
+	} else if (drm_pci_device_is_pcie(dev)) {
+		DRM_INFO("RADEON_IS_PCIE\n");
+		flags |= RADEON_IS_PCIE;
+	} else {
+		DRM_INFO("RADEON_IS_PCI\n");
+		flags |= RADEON_IS_PCI;
+	}
+
+	/* radeon_device_init should report only fatal error
+	 * like memory allocation failure or iomapping failure,
+	 * or memory manager initialization failure, it must
+	 * properly initialize the GPU MC controller and permit
+	 * VRAM allocation
+	 */
+	r = radeon_device_init(rdev, dev, flags);
+	if (r) {
+		dev_err(dev->dev, "Fatal error during GPU init\n");
+		goto out;
+	}
+
+	/* Again modeset_init should fail only on fatal error
+	 * otherwise it should provide enough functionalities
+	 * for shadowfb to run
+	 */
+	r = radeon_modeset_init(rdev);
+	if (r)
+		dev_err(dev->dev, "Fatal error during modeset init\n");
+
+	/* Call ACPI methods: require modeset init
+	 * but failure is not fatal
+	 */
+	if (!r) {
+		acpi_status = radeon_acpi_init(rdev);
+		if (acpi_status)
+		dev_dbg(dev->dev,
+				"Error during ACPI methods call\n");
+	}
+
+out:
+	if (r)
+		radeon_driver_unload_kms(dev);
+	return r;
+}
+
+/**
+ * radeon_set_filp_rights - Set filp right.
+ *
+ * @dev: drm dev pointer
+ * @owner: drm file
+ * @applier: drm file
+ * @value: value
+ *
+ * Sets the filp rights for the device (all asics).
+ */
+static void radeon_set_filp_rights(struct drm_device *dev,
+				   struct drm_file **owner,
+				   struct drm_file *applier,
+				   uint32_t *value)
+{
+	DRM_LOCK(dev);
+	if (*value == 1) {
+		/* wants rights */
+		if (!*owner)
+			*owner = applier;
+	} else if (*value == 0) {
+		/* revokes rights */
+		if (*owner == applier)
+			*owner = NULL;
+	}
+	*value = *owner == applier ? 1 : 0;
+	DRM_UNLOCK(dev);
+}
+
+/*
+ * Userspace get information ioctl
+ */
+/**
+ * radeon_info_ioctl - answer a device specific request.
+ *
+ * @rdev: radeon device pointer
+ * @data: request object
+ * @filp: drm filp
+ *
+ * This function is used to pass device specific parameters to the userspace
+ * drivers.  Examples include: pci device id, pipeline parms, tiling params,
+ * etc. (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_info *info = data;
+	struct radeon_mode_info *minfo = &rdev->mode_info;
+	uint32_t value, *value_ptr;
+	uint64_t value64, *value_ptr64;
+	struct drm_crtc *crtc;
+	int i, found;
+
+	/* TIMESTAMP is a 64-bit value, needs special handling. */
+	if (info->request == RADEON_INFO_TIMESTAMP) {
+		if (rdev->family >= CHIP_R600) {
+			value_ptr64 = (uint64_t*)((unsigned long)info->value);
+			if (rdev->family >= CHIP_TAHITI) {
+				value64 = si_get_gpu_clock(rdev);
+			} else {
+				value64 = r600_get_gpu_clock(rdev);
+			}
+
+			if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
+				DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+				return -EFAULT;
+			}
+			return 0;
+		} else {
+			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+			return -EINVAL;
+		}
+	}
+
+	value_ptr = (uint32_t *)((unsigned long)info->value);
+	if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
+		DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	switch (info->request) {
+	case RADEON_INFO_DEVICE_ID:
+		value = dev->pci_device;
+		break;
+	case RADEON_INFO_NUM_GB_PIPES:
+		value = rdev->num_gb_pipes;
+		break;
+	case RADEON_INFO_NUM_Z_PIPES:
+		value = rdev->num_z_pipes;
+		break;
+	case RADEON_INFO_ACCEL_WORKING:
+		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
+		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
+			value = false;
+		else
+			value = rdev->accel_working;
+		break;
+	case RADEON_INFO_CRTC_FROM_ID:
+		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+			crtc = (struct drm_crtc *)minfo->crtcs[i];
+			if (crtc && crtc->base.id == value) {
+				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+				value = radeon_crtc->crtc_id;
+				found = 1;
+				break;
+			}
+		}
+		if (!found) {
+			DRM_DEBUG_KMS("unknown crtc id %d\n", value);
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_ACCEL_WORKING2:
+		value = rdev->accel_working;
+		break;
+	case RADEON_INFO_TILING_CONFIG:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.tile_config;
+		else if (rdev->family >= CHIP_CAYMAN)
+			value = rdev->config.cayman.tile_config;
+		else if (rdev->family >= CHIP_CEDAR)
+			value = rdev->config.evergreen.tile_config;
+		else if (rdev->family >= CHIP_RV770)
+			value = rdev->config.rv770.tile_config;
+		else if (rdev->family >= CHIP_R600)
+			value = rdev->config.r600.tile_config;
+		else {
+			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_WANT_HYPERZ:
+		/* The "value" here is both an input and output parameter.
+		 * If the input value is 1, filp requests hyper-z access.
+		 * If the input value is 0, filp revokes its hyper-z access.
+		 *
+		 * When returning, the value is 1 if filp owns hyper-z access,
+		 * 0 otherwise. */
+		if (value >= 2) {
+			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
+			return -EINVAL;
+		}
+		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+		break;
+	case RADEON_INFO_WANT_CMASK:
+		/* The same logic as Hyper-Z. */
+		if (value >= 2) {
+			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+			return -EINVAL;
+		}
+		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
+		break;
+	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
+		/* return clock value in KHz */
+		value = rdev->clock.spll.reference_freq * 10;
+		break;
+	case RADEON_INFO_NUM_BACKENDS:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.max_backends_per_se *
+				rdev->config.si.max_shader_engines;
+		else if (rdev->family >= CHIP_CAYMAN)
+			value = rdev->config.cayman.max_backends_per_se *
+				rdev->config.cayman.max_shader_engines;
+		else if (rdev->family >= CHIP_CEDAR)
+			value = rdev->config.evergreen.max_backends;
+		else if (rdev->family >= CHIP_RV770)
+			value = rdev->config.rv770.max_backends;
+		else if (rdev->family >= CHIP_R600)
+			value = rdev->config.r600.max_backends;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_NUM_TILE_PIPES:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.max_tile_pipes;
+		else if (rdev->family >= CHIP_CAYMAN)
+			value = rdev->config.cayman.max_tile_pipes;
+		else if (rdev->family >= CHIP_CEDAR)
+			value = rdev->config.evergreen.max_tile_pipes;
+		else if (rdev->family >= CHIP_RV770)
+			value = rdev->config.rv770.max_tile_pipes;
+		else if (rdev->family >= CHIP_R600)
+			value = rdev->config.r600.max_tile_pipes;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_FUSION_GART_WORKING:
+		value = 1;
+		break;
+	case RADEON_INFO_BACKEND_MAP:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.backend_map;
+		else if (rdev->family >= CHIP_CAYMAN)
+			value = rdev->config.cayman.backend_map;
+		else if (rdev->family >= CHIP_CEDAR)
+			value = rdev->config.evergreen.backend_map;
+		else if (rdev->family >= CHIP_RV770)
+			value = rdev->config.rv770.backend_map;
+		else if (rdev->family >= CHIP_R600)
+			value = rdev->config.r600.backend_map;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_VA_START:
+		/* this is where we report if vm is supported or not */
+		if (rdev->family < CHIP_CAYMAN)
+			return -EINVAL;
+		value = RADEON_VA_RESERVED_SIZE;
+		break;
+	case RADEON_INFO_IB_VM_MAX_SIZE:
+		/* this is where we report if vm is supported or not */
+		if (rdev->family < CHIP_CAYMAN)
+			return -EINVAL;
+		value = RADEON_IB_VM_MAX_SIZE;
+		break;
+	case RADEON_INFO_MAX_PIPES:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.max_cu_per_sh;
+		else if (rdev->family >= CHIP_CAYMAN)
+			value = rdev->config.cayman.max_pipes_per_simd;
+		else if (rdev->family >= CHIP_CEDAR)
+			value = rdev->config.evergreen.max_pipes;
+		else if (rdev->family >= CHIP_RV770)
+			value = rdev->config.rv770.max_pipes;
+		else if (rdev->family >= CHIP_R600)
+			value = rdev->config.r600.max_pipes;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_MAX_SE:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.max_shader_engines;
+		else if (rdev->family >= CHIP_CAYMAN)
+			value = rdev->config.cayman.max_shader_engines;
+		else if (rdev->family >= CHIP_CEDAR)
+			value = rdev->config.evergreen.num_ses;
+		else
+			value = 1;
+		break;
+	case RADEON_INFO_MAX_SH_PER_SE:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.max_sh_per_se;
+		else
+			return -EINVAL;
+		break;
+	default:
+		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
+		return -EINVAL;
+	}
+	if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
+		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+
+/*
+ * Outdated mess for old drm with Xorg being in charge (void function now).
+ */
+/**
+ * radeon_driver_firstopen_kms - drm callback for first open
+ *
+ * @dev: drm dev pointer
+ *
+ * Nothing to be done for KMS (all asics).
+ * Returns 0 on success.
+ */
+int radeon_driver_firstopen_kms(struct drm_device *dev)
+{
+	return 0;
+}
+
+/**
+ * radeon_driver_firstopen_kms - drm callback for last close
+ *
+ * @dev: drm dev pointer
+ *
+ * Switch vga switcheroo state after last close (all asics).
+ */
+void radeon_driver_lastclose_kms(struct drm_device *dev)
+{
+#ifdef FREEBSD_WIP
+	vga_switcheroo_process_delayed_switch();
+#endif /* FREEBSD_WIP */
+}
+
+/**
+ * radeon_driver_open_kms - drm callback for open
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device open, init vm on cayman+ (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	file_priv->driver_priv = NULL;
+
+	/* new gpu have virtual address space support */
+	if (rdev->family >= CHIP_CAYMAN) {
+		struct radeon_fpriv *fpriv;
+		struct radeon_bo_va *bo_va;
+		int r;
+
+		fpriv = malloc(sizeof(*fpriv), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+		if (unlikely(!fpriv)) {
+			return -ENOMEM;
+		}
+
+		radeon_vm_init(rdev, &fpriv->vm);
+
+		/* map the ib pool buffer read only into
+		 * virtual address space */
+		bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+					 rdev->ring_tmp_bo.bo);
+		r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+					  RADEON_VM_PAGE_READABLE |
+					  RADEON_VM_PAGE_SNOOPED);
+		if (r) {
+			radeon_vm_fini(rdev, &fpriv->vm);
+			free(fpriv, DRM_MEM_DRIVER);
+			return r;
+		}
+
+		file_priv->driver_priv = fpriv;
+	}
+	return 0;
+}
+
+/**
+ * radeon_driver_postclose_kms - drm callback for post close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device post close, tear down vm on cayman+ (all asics).
+ */
+void radeon_driver_postclose_kms(struct drm_device *dev,
+				 struct drm_file *file_priv)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* new gpu have virtual address space support */
+	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+		struct radeon_fpriv *fpriv = file_priv->driver_priv;
+		struct radeon_bo_va *bo_va;
+		int r;
+
+		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+		if (!r) {
+			bo_va = radeon_vm_bo_find(&fpriv->vm,
+						  rdev->ring_tmp_bo.bo);
+			if (bo_va)
+				radeon_vm_bo_rmv(rdev, bo_va);
+			radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+		}
+
+		radeon_vm_fini(rdev, &fpriv->vm);
+		free(fpriv, DRM_MEM_DRIVER);
+		file_priv->driver_priv = NULL;
+	}
+}
+
+/**
+ * radeon_driver_preclose_kms - drm callback for pre close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
+ * (all asics).
+ */
+void radeon_driver_preclose_kms(struct drm_device *dev,
+				struct drm_file *file_priv)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	if (rdev->hyperz_filp == file_priv)
+		rdev->hyperz_filp = NULL;
+	if (rdev->cmask_filp == file_priv)
+		rdev->cmask_filp = NULL;
+}
+
+/*
+ * VBlank related functions.
+ */
+/**
+ * radeon_get_vblank_counter_kms - get frame count
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the frame count from
+ *
+ * Gets the frame count on the requested crtc (all asics).
+ * Returns frame count on success, -EINVAL on failure.
+ */
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	return radeon_get_vblank_counter(rdev, crtc);
+}
+
+/**
+ * radeon_enable_vblank_kms - enable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to enable vblank interrupt for
+ *
+ * Enable the interrupt on the requested crtc (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int r;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	mtx_lock(&rdev->irq.lock);
+	rdev->irq.crtc_vblank_int[crtc] = true;
+	r = radeon_irq_set(rdev);
+	mtx_unlock(&rdev->irq.lock);
+	return r;
+}
+
+/**
+ * radeon_disable_vblank_kms - disable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to disable vblank interrupt for
+ *
+ * Disable the interrupt on the requested crtc (all asics).
+ */
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return;
+	}
+
+	mtx_lock(&rdev->irq.lock);
+	rdev->irq.crtc_vblank_int[crtc] = false;
+	radeon_irq_set(rdev);
+	mtx_unlock(&rdev->irq.lock);
+}
+
+/**
+ * radeon_get_vblank_timestamp_kms - get vblank timestamp
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the timestamp for
+ * @max_error: max error
+ * @vblank_time: time value
+ * @flags: flags passed to the driver
+ *
+ * Gets the timestamp on the requested crtc based on the
+ * scanout position.  (all asics).
+ * Returns postive status flags on success, negative error on failure.
+ */
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags)
+{
+	struct drm_crtc *drmcrtc;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Get associated drm_crtc: */
+	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+						     vblank_time, flags,
+						     drmcrtc);
+}
+
+/*
+ * IOCTL.
+ */
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	/* Not valid in KMS. */
+	return -EINVAL;
+}
+
+#define KMS_INVALID_IOCTL(name)						\
+static int								\
+name(struct drm_device *dev, void *data, struct drm_file *file_priv)	\
+{									\
+	DRM_ERROR("invalid ioctl with kms %s\n", __func__);		\
+	return -EINVAL;							\
+}
+
+/*
+ * All these ioctls are invalid in kms world.
+ */
+KMS_INVALID_IOCTL(radeon_cp_init_kms)
+KMS_INVALID_IOCTL(radeon_cp_start_kms)
+KMS_INVALID_IOCTL(radeon_cp_stop_kms)
+KMS_INVALID_IOCTL(radeon_cp_reset_kms)
+KMS_INVALID_IOCTL(radeon_cp_idle_kms)
+KMS_INVALID_IOCTL(radeon_cp_resume_kms)
+KMS_INVALID_IOCTL(radeon_engine_reset_kms)
+KMS_INVALID_IOCTL(radeon_fullscreen_kms)
+KMS_INVALID_IOCTL(radeon_cp_swap_kms)
+KMS_INVALID_IOCTL(radeon_cp_clear_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
+KMS_INVALID_IOCTL(radeon_cp_indices_kms)
+KMS_INVALID_IOCTL(radeon_cp_texture_kms)
+KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
+KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
+KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
+KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
+KMS_INVALID_IOCTL(radeon_cp_flip_kms)
+KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
+KMS_INVALID_IOCTL(radeon_mem_free_kms)
+KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
+KMS_INVALID_IOCTL(radeon_irq_emit_kms)
+KMS_INVALID_IOCTL(radeon_irq_wait_kms)
+KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
+KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
+KMS_INVALID_IOCTL(radeon_surface_free_kms)
+
+
+struct drm_ioctl_desc radeon_ioctls_kms[] = {
+	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+	/* KMS */
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
+};
+int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);


Property changes on: trunk/sys/dev/drm2/radeon/radeon_kms.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_kms.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_kms.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_kms.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,31 @@
+/* $MidnightBSD$ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_kms.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef __RADEON_KMS_H__
+#define	__RADEON_KMS_H__
+
+#include <dev/drm2/drmP.h>
+
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int radeon_driver_unload_kms(struct drm_device *dev);
+
+int radeon_driver_firstopen_kms(struct drm_device *dev);
+void radeon_driver_lastclose_kms(struct drm_device *dev);
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
+void radeon_driver_postclose_kms(struct drm_device *dev,
+				 struct drm_file *file_priv);
+void radeon_driver_preclose_kms(struct drm_device *dev,
+				struct drm_file *file_priv);
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags);
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+
+#endif /* !defined(__RADEON_KMS_H__) */


Property changes on: trunk/sys/dev/drm2/radeon/radeon_kms.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_legacy_crtc.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_legacy_crtc.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_legacy_crtc.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1086 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_legacy_crtc.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include <dev/drm2/drm_fixed.h>
+#include "radeon.h"
+#include "atom.h"
+
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
+static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
+				       struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	int xres = mode->hdisplay;
+	int yres = mode->vdisplay;
+	bool hscale = true, vscale = true;
+	int hsync_wid;
+	int vsync_wid;
+	int hsync_start;
+	int blank_width;
+	u32 scale, inc, crtc_more_cntl;
+	u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
+	u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
+	u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
+	struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
+
+	fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
+		(RADEON_VERT_STRETCH_RESERVED |
+		 RADEON_VERT_AUTO_RATIO_INC);
+	fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
+		(RADEON_HORZ_FP_LOOP_STRETCH |
+		 RADEON_HORZ_AUTO_RATIO_INC);
+
+	crtc_more_cntl = 0;
+	if ((rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		/* This is to workaround the asic bug for RMX, some versions
+		   of BIOS dosen't have this register initialized correctly. */
+		crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
+	}
+
+
+	fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+				| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+	hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+	if (!hsync_wid)
+		hsync_wid = 1;
+	hsync_start = mode->crtc_hsync_start - 8;
+
+	fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
+			      | ((hsync_wid & 0x3f) << 16)
+			      | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+				 ? RADEON_CRTC_H_SYNC_POL
+				 : 0));
+
+	fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+				| ((mode->crtc_vdisplay - 1) << 16));
+
+	vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	if (!vsync_wid)
+		vsync_wid = 1;
+
+	fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+			      | ((vsync_wid & 0x1f) << 16)
+			      | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+				 ? RADEON_CRTC_V_SYNC_POL
+				 : 0));
+
+	fp_horz_vert_active = 0;
+
+	if (native_mode->hdisplay == 0 ||
+	    native_mode->vdisplay == 0) {
+		hscale = false;
+		vscale = false;
+	} else {
+		if (xres > native_mode->hdisplay)
+			xres = native_mode->hdisplay;
+		if (yres > native_mode->vdisplay)
+			yres = native_mode->vdisplay;
+
+		if (xres == native_mode->hdisplay)
+			hscale = false;
+		if (yres == native_mode->vdisplay)
+			vscale = false;
+	}
+
+	switch (radeon_crtc->rmx_type) {
+	case RMX_FULL:
+	case RMX_ASPECT:
+		if (!hscale)
+			fp_horz_stretch |= ((xres/8-1) << 16);
+		else {
+			inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
+			scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
+				/ native_mode->hdisplay + 1;
+			fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
+					RADEON_HORZ_STRETCH_BLEND |
+					RADEON_HORZ_STRETCH_ENABLE |
+					((native_mode->hdisplay/8-1) << 16));
+		}
+
+		if (!vscale)
+			fp_vert_stretch |= ((yres-1) << 12);
+		else {
+			inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
+			scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
+				/ native_mode->vdisplay + 1;
+			fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
+					RADEON_VERT_STRETCH_ENABLE |
+					RADEON_VERT_STRETCH_BLEND |
+					((native_mode->vdisplay-1) << 12));
+		}
+		break;
+	case RMX_CENTER:
+		fp_horz_stretch |= ((xres/8-1) << 16);
+		fp_vert_stretch |= ((yres-1) << 12);
+
+		crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
+				RADEON_CRTC_AUTO_VERT_CENTER_EN);
+
+		blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
+		if (blank_width > 110)
+			blank_width = 110;
+
+		fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
+				| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+		hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+		if (!hsync_wid)
+			hsync_wid = 1;
+
+		fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
+				| ((hsync_wid & 0x3f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+					? RADEON_CRTC_H_SYNC_POL
+					: 0));
+
+		fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
+				| ((mode->crtc_vdisplay - 1) << 16));
+
+		vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+		if (!vsync_wid)
+			vsync_wid = 1;
+
+		fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
+					| ((vsync_wid & 0x1f) << 16)
+					| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+						? RADEON_CRTC_V_SYNC_POL
+						: 0)));
+
+		fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
+				(((native_mode->hdisplay / 8) & 0x1ff) << 16));
+		break;
+	case RMX_OFF:
+	default:
+		fp_horz_stretch |= ((xres/8-1) << 16);
+		fp_vert_stretch |= ((yres-1) << 12);
+		break;
+	}
+
+	WREG32(RADEON_FP_HORZ_STRETCH,      fp_horz_stretch);
+	WREG32(RADEON_FP_VERT_STRETCH,      fp_vert_stretch);
+	WREG32(RADEON_CRTC_MORE_CNTL,       crtc_more_cntl);
+	WREG32(RADEON_FP_HORZ_VERT_ACTIVE,  fp_horz_vert_active);
+	WREG32(RADEON_FP_H_SYNC_STRT_WID,   fp_h_sync_strt_wid);
+	WREG32(RADEON_FP_V_SYNC_STRT_WID,   fp_v_sync_strt_wid);
+	WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
+	WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
+}
+
+static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int i = 0;
+
+	/* FIXME: Certain revisions of R300 can't recover here.  Not sure of
+	   the cause yet, but this workaround will mask the problem for now.
+	   Other chips usually will pass at the very first test, so the
+	   workaround shouldn't have any effect on them. */
+	for (i = 0;
+	     (i < 10000 &&
+	      RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+	     i++);
+}
+
+static void radeon_pll_write_update(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+
+	WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+			   RADEON_PPLL_ATOMIC_UPDATE_W,
+			   ~(RADEON_PPLL_ATOMIC_UPDATE_W));
+}
+
+static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int i = 0;
+
+
+	/* FIXME: Certain revisions of R300 can't recover here.  Not sure of
+	   the cause yet, but this workaround will mask the problem for now.
+	   Other chips usually will pass at the very first test, so the
+	   workaround shouldn't have any effect on them. */
+	for (i = 0;
+	     (i < 10000 &&
+	      RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+	     i++);
+}
+
+static void radeon_pll2_write_update(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+
+	WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+			   RADEON_P2PLL_ATOMIC_UPDATE_W,
+			   ~(RADEON_P2PLL_ATOMIC_UPDATE_W));
+}
+
+static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
+				       uint16_t fb_div)
+{
+	unsigned int vcoFreq;
+
+	if (!ref_div)
+		return 1;
+
+	vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
+
+	/*
+	 * This is horribly crude: the VCO frequency range is divided into
+	 * 3 parts, each part having a fixed PLL gain value.
+	 */
+	if (vcoFreq >= 30000)
+		/*
+		 * [300..max] MHz : 7
+		 */
+		return 7;
+	else if (vcoFreq >= 18000)
+		/*
+		 * [180..300) MHz : 4
+		 */
+		return 4;
+	else
+		/*
+		 * [0..180) MHz : 1
+		 */
+		return 1;
+}
+
+static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc_ext_cntl = 0;
+	uint32_t mask;
+
+	if (radeon_crtc->crtc_id)
+		mask = (RADEON_CRTC2_DISP_DIS |
+			RADEON_CRTC2_VSYNC_DIS |
+			RADEON_CRTC2_HSYNC_DIS |
+			RADEON_CRTC2_DISP_REQ_EN_B);
+	else
+		mask = (RADEON_CRTC_DISPLAY_DIS |
+			RADEON_CRTC_VSYNC_DIS |
+			RADEON_CRTC_HSYNC_DIS);
+
+	/*
+	 * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC.
+	 * Therefore it is set in the DAC DMPS function.
+	 * This is different for GPU's with a single CRTC but a primary and a
+	 * TV DAC: here it controls the single CRTC no matter where it is
+	 * routed. Therefore we set it here.
+	 */
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		crtc_ext_cntl = RADEON_CRTC_CRT_ON;
+	
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		radeon_crtc->enabled = true;
+		/* adjust pm to dpms changes BEFORE enabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		if (radeon_crtc->crtc_id)
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
+		else {
+			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
+									 RADEON_CRTC_DISP_REQ_EN_B));
+			WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
+		}
+		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		radeon_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+		if (radeon_crtc->crtc_id)
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
+		else {
+			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
+										    RADEON_CRTC_DISP_REQ_EN_B));
+			WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl));
+		}
+		radeon_crtc->enabled = false;
+		/* adjust pm to dpms changes AFTER disabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		break;
+	}
+}
+
+int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+			 struct drm_framebuffer *old_fb)
+{
+	return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, enum mode_set_atomic state)
+{
+	return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+			 struct drm_framebuffer *fb,
+			 int x, int y, int atomic)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_framebuffer *radeon_fb;
+	struct drm_framebuffer *target_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	uint64_t base;
+	uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
+	uint32_t crtc_pitch, pitch_pixels;
+	uint32_t tiling_flags;
+	int format;
+	uint32_t gen_cntl_reg, gen_cntl_val;
+	int r;
+
+	DRM_DEBUG_KMS("\n");
+	/* no fb bound */
+	if (!atomic && !crtc->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		target_fb = fb;
+	}
+	else {
+		radeon_fb = to_radeon_framebuffer(crtc->fb);
+		target_fb = crtc->fb;
+	}
+
+	switch (target_fb->bits_per_pixel) {
+	case 8:
+		format = 2;
+		break;
+	case 15:      /*  555 */
+		format = 3;
+		break;
+	case 16:      /*  565 */
+		format = 4;
+		break;
+	case 24:      /*  RGB */
+		format = 5;
+		break;
+	case 32:      /* xRGB */
+		format = 6;
+		break;
+	default:
+		return false;
+	}
+
+	/* Pin framebuffer & get tilling informations */
+	obj = radeon_fb->obj;
+	rbo = gem_to_radeon_bo(obj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	/* Only 27 bit offset for legacy CRTC */
+	r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27,
+				     &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+		return -EINVAL;
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+	if (tiling_flags & RADEON_TILING_MICRO)
+		DRM_ERROR("trying to scanout microtiled buffer\n");
+
+	/* if scanout was in GTT this really wouldn't work */
+	/* crtc offset is from display base addr not FB location */
+	radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
+
+	base -= radeon_crtc->legacy_display_base_addr;
+
+	crtc_offset_cntl = 0;
+
+	pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+	crtc_pitch  = (((pitch_pixels * target_fb->bits_per_pixel) +
+			((target_fb->bits_per_pixel * 8) - 1)) /
+		       (target_fb->bits_per_pixel * 8));
+	crtc_pitch |= crtc_pitch << 16;
+
+	crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		if (ASIC_IS_R300(rdev))
+			crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
+					     R300_CRTC_MICRO_TILE_BUFFER_DIS |
+					     R300_CRTC_MACRO_TILE_EN);
+		else
+			crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
+	} else {
+		if (ASIC_IS_R300(rdev))
+			crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
+					      R300_CRTC_MICRO_TILE_BUFFER_DIS |
+					      R300_CRTC_MACRO_TILE_EN);
+		else
+			crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
+	}
+
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		if (ASIC_IS_R300(rdev)) {
+			crtc_tile_x0_y0 = x | (y << 16);
+			base &= ~0x7ff;
+		} else {
+			int byteshift = target_fb->bits_per_pixel >> 4;
+			int tile_addr = (((y >> 3) * pitch_pixels +  x) >> (8 - byteshift)) << 11;
+			base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
+			crtc_offset_cntl |= (y % 16);
+		}
+	} else {
+		int offset = y * pitch_pixels + x;
+		switch (target_fb->bits_per_pixel) {
+		case 8:
+			offset *= 1;
+			break;
+		case 15:
+		case 16:
+			offset *= 2;
+			break;
+		case 24:
+			offset *= 3;
+			break;
+		case 32:
+			offset *= 4;
+			break;
+		default:
+			return false;
+		}
+		base += offset;
+	}
+
+	base &= ~7;
+
+	if (radeon_crtc->crtc_id == 1)
+		gen_cntl_reg = RADEON_CRTC2_GEN_CNTL;
+	else
+		gen_cntl_reg = RADEON_CRTC_GEN_CNTL;
+
+	gen_cntl_val = RREG32(gen_cntl_reg);
+	gen_cntl_val &= ~(0xf << 8);
+	gen_cntl_val |= (format << 8);
+	gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+	WREG32(gen_cntl_reg, gen_cntl_val);
+
+	crtc_offset = (u32)base;
+
+	WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr);
+
+	if (ASIC_IS_R300(rdev)) {
+		if (radeon_crtc->crtc_id)
+			WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
+		else
+			WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
+	}
+	WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
+	WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
+	if (!atomic && fb && fb != crtc->fb) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_encoder *encoder;
+	int format;
+	int hsync_start;
+	int hsync_wid;
+	int vsync_wid;
+	uint32_t crtc_h_total_disp;
+	uint32_t crtc_h_sync_strt_wid;
+	uint32_t crtc_v_total_disp;
+	uint32_t crtc_v_sync_strt_wid;
+	bool is_tv = false;
+
+	DRM_DEBUG_KMS("\n");
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+				is_tv = true;
+				DRM_INFO("crtc %d is connected to a TV\n", radeon_crtc->crtc_id);
+				break;
+			}
+		}
+	}
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		format = 2;
+		break;
+	case 15:      /*  555 */
+		format = 3;
+		break;
+	case 16:      /*  565 */
+		format = 4;
+		break;
+	case 24:      /*  RGB */
+		format = 5;
+		break;
+	case 32:      /* xRGB */
+		format = 6;
+		break;
+	default:
+		return false;
+	}
+
+	crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+			     | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+	hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+	if (!hsync_wid)
+		hsync_wid = 1;
+	hsync_start = mode->crtc_hsync_start - 8;
+
+	crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
+				| ((hsync_wid & 0x3f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+				   ? RADEON_CRTC_H_SYNC_POL
+				   : 0));
+
+	/* This works for double scan mode. */
+	crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+			     | ((mode->crtc_vdisplay - 1) << 16));
+
+	vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	if (!vsync_wid)
+		vsync_wid = 1;
+
+	crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+				| ((vsync_wid & 0x1f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+				   ? RADEON_CRTC_V_SYNC_POL
+				   : 0));
+
+	if (radeon_crtc->crtc_id) {
+		uint32_t crtc2_gen_cntl;
+		uint32_t disp2_merge_cntl;
+
+		/* if TV DAC is enabled for another crtc and keep it enabled */
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
+		crtc2_gen_cntl |= ((format << 8)
+				   | RADEON_CRTC2_VSYNC_DIS
+				   | RADEON_CRTC2_HSYNC_DIS
+				   | RADEON_CRTC2_DISP_DIS
+				   | RADEON_CRTC2_DISP_REQ_EN_B
+				   | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+				      ? RADEON_CRTC2_DBL_SCAN_EN
+				      : 0)
+				   | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+				      ? RADEON_CRTC2_CSYNC_EN
+				      : 0)
+				   | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+				      ? RADEON_CRTC2_INTERLACE_EN
+				      : 0));
+
+		/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+		if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+			crtc2_gen_cntl |= RADEON_CRTC2_EN;
+
+		disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+		disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+
+		WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+
+		WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
+		WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
+	} else {
+		uint32_t crtc_gen_cntl;
+		uint32_t crtc_ext_cntl;
+		uint32_t disp_merge_cntl;
+
+		crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+		crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
+				 | (format << 8)
+				 | RADEON_CRTC_DISP_REQ_EN_B
+				 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+				    ? RADEON_CRTC_DBL_SCAN_EN
+				    : 0)
+				 | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+				    ? RADEON_CRTC_CSYNC_EN
+				    : 0)
+				 | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+				    ? RADEON_CRTC_INTERLACE_EN
+				    : 0));
+
+		/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+		if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+			crtc_gen_cntl |= RADEON_CRTC_EN;
+
+		crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+		crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
+				  RADEON_CRTC_VSYNC_DIS |
+				  RADEON_CRTC_HSYNC_DIS |
+				  RADEON_CRTC_DISPLAY_DIS);
+
+		disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+
+		WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+		WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	}
+
+	if (is_tv)
+		radeon_legacy_tv_adjust_crtc_reg(encoder, &crtc_h_total_disp,
+						 &crtc_h_sync_strt_wid, &crtc_v_total_disp,
+						 &crtc_v_sync_strt_wid);
+
+	WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
+	WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
+	WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
+	WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
+
+	return true;
+}
+
+static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_encoder *encoder;
+	uint32_t feedback_div = 0;
+	uint32_t frac_fb_div = 0;
+	uint32_t reference_div = 0;
+	uint32_t post_divider = 0;
+	uint32_t freq = 0;
+	uint8_t pll_gain;
+	bool use_bios_divs = false;
+	/* PLL registers */
+	uint32_t pll_ref_div = 0;
+	uint32_t pll_fb_post_div = 0;
+	uint32_t htotal_cntl = 0;
+	bool is_tv = false;
+	struct radeon_pll *pll;
+
+	struct {
+		int divider;
+		int bitvalue;
+	} *post_div, post_divs[]   = {
+		/* From RAGE 128 VR/RAGE 128 GL Register
+		 * Reference Manual (Technical Reference
+		 * Manual P/N RRG-G04100-C Rev. 0.04), page
+		 * 3-17 (PLL_DIV_[3:0]).
+		 */
+		{  1, 0 },              /* VCLK_SRC                 */
+		{  2, 1 },              /* VCLK_SRC/2               */
+		{  4, 2 },              /* VCLK_SRC/4               */
+		{  8, 3 },              /* VCLK_SRC/8               */
+		{  3, 4 },              /* VCLK_SRC/3               */
+		{ 16, 5 },              /* VCLK_SRC/16              */
+		{  6, 6 },              /* VCLK_SRC/6               */
+		{ 12, 7 },              /* VCLK_SRC/12              */
+		{  0, 0 }
+	};
+
+	if (radeon_crtc->crtc_id)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	pll->flags = RADEON_PLL_LEGACY;
+
+	if (mode->clock > 200000) /* range limits??? */
+		pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+	else
+		pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+			if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+				is_tv = true;
+				break;
+			}
+
+			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+				pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
+			if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+				if (!rdev->is_atom_bios) {
+					struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+					struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+					if (lvds) {
+						if (lvds->use_bios_dividers) {
+							pll_ref_div = lvds->panel_ref_divider;
+							pll_fb_post_div   = (lvds->panel_fb_divider |
+									     (lvds->panel_post_divider << 16));
+							htotal_cntl  = 0;
+							use_bios_divs = true;
+						}
+					}
+				}
+				pll->flags |= RADEON_PLL_USE_REF_DIV;
+			}
+		}
+	}
+
+	DRM_DEBUG_KMS("\n");
+
+	if (!use_bios_divs) {
+		radeon_compute_pll_legacy(pll, mode->clock,
+					  &freq, &feedback_div, &frac_fb_div,
+					  &reference_div, &post_divider);
+
+		for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
+			if (post_div->divider == post_divider)
+				break;
+		}
+
+		if (!post_div->divider)
+			post_div = &post_divs[0];
+
+		DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n",
+			  (unsigned)freq,
+			  feedback_div,
+			  reference_div,
+			  post_divider);
+
+		pll_ref_div   = reference_div;
+#if defined(__powerpc__) && (0) /* TODO */
+		/* apparently programming this otherwise causes a hang??? */
+		if (info->MacModel == RADEON_MAC_IBOOK)
+			pll_fb_post_div = 0x000600ad;
+		else
+#endif
+			pll_fb_post_div     = (feedback_div | (post_div->bitvalue << 16));
+
+		htotal_cntl    = mode->htotal & 0x7;
+
+	}
+
+	pll_gain = radeon_compute_pll_gain(pll->reference_freq,
+					   pll_ref_div & 0x3ff,
+					   pll_fb_post_div & 0x7ff);
+
+	if (radeon_crtc->crtc_id) {
+		uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
+					  ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
+					 RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
+
+		if (is_tv) {
+			radeon_legacy_tv_adjust_pll2(encoder, &htotal_cntl,
+						     &pll_ref_div, &pll_fb_post_div,
+						     &pixclks_cntl);
+		}
+
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+			     RADEON_PIX2CLK_SRC_SEL_CPUCLK,
+			     ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+		WREG32_PLL_P(RADEON_P2PLL_CNTL,
+			     RADEON_P2PLL_RESET
+			     | RADEON_P2PLL_ATOMIC_UPDATE_EN
+			     | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
+			     ~(RADEON_P2PLL_RESET
+			       | RADEON_P2PLL_ATOMIC_UPDATE_EN
+			       | RADEON_P2PLL_PVG_MASK));
+
+		WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+			     pll_ref_div,
+			     ~RADEON_P2PLL_REF_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+			     pll_fb_post_div,
+			     ~RADEON_P2PLL_FB0_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+			     pll_fb_post_div,
+			     ~RADEON_P2PLL_POST0_DIV_MASK);
+
+		radeon_pll2_write_update(dev);
+		radeon_pll2_wait_for_read_update_complete(dev);
+
+		WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
+
+		WREG32_PLL_P(RADEON_P2PLL_CNTL,
+			     0,
+			     ~(RADEON_P2PLL_RESET
+			       | RADEON_P2PLL_SLEEP
+			       | RADEON_P2PLL_ATOMIC_UPDATE_EN));
+
+		DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+			  (unsigned)pll_ref_div,
+			  (unsigned)pll_fb_post_div,
+			  (unsigned)htotal_cntl,
+			  RREG32_PLL(RADEON_P2PLL_CNTL));
+		DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n",
+			  (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
+			  (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
+			  (unsigned)((pll_fb_post_div &
+				      RADEON_P2PLL_POST0_DIV_MASK) >> 16));
+
+		mdelay(50); /* Let the clock to lock */
+
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+			     RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
+			     ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+	} else {
+		uint32_t pixclks_cntl;
+
+
+		if (is_tv) {
+			pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			radeon_legacy_tv_adjust_pll1(encoder, &htotal_cntl, &pll_ref_div,
+						     &pll_fb_post_div, &pixclks_cntl);
+		}
+
+		if (rdev->flags & RADEON_IS_MOBILITY) {
+			/* A temporal workaround for the occasional blanking on certain laptop panels.
+			   This appears to related to the PLL divider registers (fail to lock?).
+			   It occurs even when all dividers are the same with their old settings.
+			   In this case we really don't need to fiddle with PLL registers.
+			   By doing this we can avoid the blanking problem with some panels.
+			*/
+			if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
+			    (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
+						 (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
+				WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+					 RADEON_PLL_DIV_SEL,
+					 ~(RADEON_PLL_DIV_SEL));
+				r100_pll_errata_after_index(rdev);
+				return;
+			}
+		}
+
+		WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+			     RADEON_VCLK_SRC_SEL_CPUCLK,
+			     ~(RADEON_VCLK_SRC_SEL_MASK));
+		WREG32_PLL_P(RADEON_PPLL_CNTL,
+			     RADEON_PPLL_RESET
+			     | RADEON_PPLL_ATOMIC_UPDATE_EN
+			     | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+			     | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
+			     ~(RADEON_PPLL_RESET
+			       | RADEON_PPLL_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_PVG_MASK));
+
+		WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+			 RADEON_PLL_DIV_SEL,
+			 ~(RADEON_PLL_DIV_SEL));
+		r100_pll_errata_after_index(rdev);
+
+		if (ASIC_IS_R300(rdev) ||
+		    (rdev->family == CHIP_RS300) ||
+		    (rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+			if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
+				/* When restoring console mode, use saved PPLL_REF_DIV
+				 * setting.
+				 */
+				WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+					     pll_ref_div,
+					     0);
+			} else {
+				/* R300 uses ref_div_acc field as real ref divider */
+				WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+					     (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
+					     ~R300_PPLL_REF_DIV_ACC_MASK);
+			}
+		} else
+			WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+				     pll_ref_div,
+				     ~RADEON_PPLL_REF_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_PPLL_DIV_3,
+			     pll_fb_post_div,
+			     ~RADEON_PPLL_FB3_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_PPLL_DIV_3,
+			     pll_fb_post_div,
+			     ~RADEON_PPLL_POST3_DIV_MASK);
+
+		radeon_pll_write_update(dev);
+		radeon_pll_wait_for_read_update_complete(dev);
+
+		WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
+
+		WREG32_PLL_P(RADEON_PPLL_CNTL,
+			     0,
+			     ~(RADEON_PPLL_RESET
+			       | RADEON_PPLL_SLEEP
+			       | RADEON_PPLL_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
+
+		DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+			  pll_ref_div,
+			  pll_fb_post_div,
+			  (unsigned)htotal_cntl,
+			  RREG32_PLL(RADEON_PPLL_CNTL));
+		DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n",
+			  pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
+			  pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
+			  (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
+
+		mdelay(50); /* Let the clock to lock */
+
+		WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+			     RADEON_VCLK_SRC_SEL_PPLLCLK,
+			     ~(RADEON_VCLK_SRC_SEL_MASK));
+
+		if (is_tv)
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+	}
+}
+
+static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+		return false;
+	return true;
+}
+
+static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode,
+				 int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	/* TODO TV */
+	radeon_crtc_set_base(crtc, x, y, old_fb);
+	radeon_set_crtc_timing(crtc, adjusted_mode);
+	radeon_set_pll(crtc, adjusted_mode);
+	radeon_overscan_setup(crtc, adjusted_mode);
+	if (radeon_crtc->crtc_id == 0) {
+		radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
+	} else {
+		if (radeon_crtc->rmx_type != RMX_OFF) {
+			/* FIXME: only first crtc has rmx what should we
+			 * do ?
+			 */
+			DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
+		}
+	}
+	return 0;
+}
+
+static void radeon_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	radeon_crtc->in_mode_set = true;
+	/*
+	* The hardware wedges sometimes if you reconfigure one CRTC
+	* whilst another is running (see fdo bug #24611).
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+		radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_crtc_commit(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* Reenable the CRTCs that should be running.
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+		if (crtci->enabled)
+			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+	}
+	radeon_crtc->in_mode_set = false;
+}
+
+static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
+	.dpms = radeon_crtc_dpms,
+	.mode_fixup = radeon_crtc_mode_fixup,
+	.mode_set = radeon_crtc_mode_set,
+	.mode_set_base = radeon_crtc_set_base,
+	.mode_set_base_atomic = radeon_crtc_set_base_atomic,
+	.prepare = radeon_crtc_prepare,
+	.commit = radeon_crtc_commit,
+	.load_lut = radeon_crtc_load_lut,
+};
+
+
+void radeon_legacy_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc)
+{
+	if (radeon_crtc->crtc_id == 1)
+		radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
+	drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_legacy_crtc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_legacy_encoders.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_legacy_encoders.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_legacy_encoders.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1817 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_legacy_encoders.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder_helper_funcs *encoder_funcs;
+
+	encoder_funcs = encoder->helper_private;
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+	radeon_encoder->active_device = 0;
+}
+
+static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
+	int panel_pwr_delay = 2000;
+	bool is_mac = false;
+	uint8_t backlight_level;
+	DRM_DEBUG_KMS("\n");
+
+	lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			panel_pwr_delay = lvds->panel_pwr_delay;
+			if (lvds->bl_dev)
+				backlight_level = lvds->backlight_level;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			panel_pwr_delay = lvds->panel_pwr_delay;
+			if (lvds->bl_dev)
+				backlight_level = lvds->backlight_level;
+		}
+	}
+
+	/* macs (and possibly some x86 oem systems?) wire up LVDS strangely
+	 * Taken from radeonfb.
+	 */
+	if ((rdev->mode_info.connector_table == CT_IBOOK) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
+		is_mac = true;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
+		disp_pwr_man |= RADEON_AUTO_PWRUP_EN;
+		WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man);
+		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+		lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
+		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+		mdelay(1);
+
+		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+		lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
+		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+
+		lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
+				   RADEON_LVDS_BL_MOD_LEVEL_MASK);
+		lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
+				  RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
+				  (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
+		if (is_mac)
+			lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
+		mdelay(panel_pwr_delay);
+		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
+		lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+		if (is_mac) {
+			lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
+			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
+		} else {
+			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+		}
+		mdelay(panel_pwr_delay);
+		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+		mdelay(panel_pwr_delay);
+		break;
+	}
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DRM_DEBUG("\n");
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			lvds->dpms_mode = mode;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			lvds->dpms_mode = mode;
+		}
+	}
+
+	radeon_legacy_lvds_update(encoder, mode);
+}
+
+static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+	lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
+
+	lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+	if (rdev->is_atom_bios) {
+		/* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+		 * need to call that on resume to set up the reg properly.
+		 */
+		radeon_encoder->pixel_clock = adjusted_mode->clock;
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	} else {
+		struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+		if (lvds) {
+			DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+			lvds_gen_cntl = lvds->lvds_gen_cntl;
+			lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+					      (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+			lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+					     (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+		} else
+			lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	}
+	lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+	lvds_gen_cntl &= ~(RADEON_LVDS_ON |
+			   RADEON_LVDS_BLON |
+			   RADEON_LVDS_EN |
+			   RADEON_LVDS_RST_FM);
+
+	if (ASIC_IS_R300(rdev))
+		lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK);
+
+	if (radeon_crtc->crtc_id == 0) {
+		if (ASIC_IS_R300(rdev)) {
+			if (radeon_encoder->rmx_type != RMX_OFF)
+				lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
+		} else
+			lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
+	} else {
+		if (ASIC_IS_R300(rdev))
+			lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2;
+		else
+			lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2;
+	}
+
+	WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+	WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+	WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl);
+
+	if (rdev->family == CHIP_RV410)
+		WREG32(RADEON_CLOCK_CNTL_INDEX, 0);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	/* set the active encoder to connector routing */
+	radeon_encoder_set_active_device(encoder);
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
+	.dpms = radeon_legacy_lvds_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_lvds_prepare,
+	.mode_set = radeon_legacy_lvds_mode_set,
+	.commit = radeon_legacy_lvds_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+u8
+radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u8 backlight_level;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	return backlight_level;
+}
+
+void
+radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int dpms_mode = DRM_MODE_DPMS_ON;
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			if (lvds->backlight_level > 0)
+				dpms_mode = lvds->dpms_mode;
+			else
+				dpms_mode = DRM_MODE_DPMS_OFF;
+			lvds->backlight_level = level;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			if (lvds->backlight_level > 0)
+				dpms_mode = lvds->dpms_mode;
+			else
+				dpms_mode = DRM_MODE_DPMS_OFF;
+			lvds->backlight_level = level;
+		}
+	}
+
+	radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	uint8_t level;
+
+	/* Convert brightness to hardware level */
+	if (bd->props.brightness < 0)
+		level = 0;
+	else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+		level = RADEON_MAX_BL_LEVEL;
+	else
+		level = bd->props.brightness;
+
+	if (pdata->negative)
+		level = RADEON_MAX_BL_LEVEL - level;
+
+	return level;
+}
+
+static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+	radeon_legacy_set_backlight_level(radeon_encoder,
+					  radeon_legacy_lvds_level(bd));
+
+	return 0;
+}
+
+static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint8_t backlight_level;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
+}
+
+static const struct backlight_ops radeon_backlight_ops = {
+	.get_brightness = radeon_legacy_backlight_get_brightness,
+	.update_status	= radeon_legacy_backlight_update_status,
+};
+
+void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+				  struct drm_connector *drm_connector)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct radeon_backlight_privdata *pdata;
+	uint8_t backlight_level;
+	char bl_name[16];
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+	if (!pmac_has_backlight_type("ati") &&
+	    !pmac_has_backlight_type("mnca"))
+		return;
+#endif
+
+	pdata = malloc(sizeof(struct radeon_backlight_privdata),
+	    DRM_MEM_DRIVER, M_NOWAIT);
+	if (!pdata) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto error;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = RADEON_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+	snprintf(bl_name, sizeof(bl_name),
+		 "radeon_bl%d", dev->primary->index);
+	bd = backlight_device_register(bl_name, &drm_connector->kdev,
+				       pdata, &radeon_backlight_ops, &props);
+	if (IS_ERR(bd)) {
+		DRM_ERROR("Backlight registration failed\n");
+		goto error;
+	}
+
+	pdata->encoder = radeon_encoder;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	/* First, try to detect backlight level sense based on the assumption
+	 * that firmware set it up at full brightness
+	 */
+	if (backlight_level == 0)
+		pdata->negative = true;
+	else if (backlight_level == 0xff)
+		pdata->negative = false;
+	else {
+		/* XXX hack... maybe some day we can figure out in what direction
+		 * backlight should work on a given panel?
+		 */
+		pdata->negative = (rdev->family != CHIP_RV200 &&
+				   rdev->family != CHIP_RV250 &&
+				   rdev->family != CHIP_RV280 &&
+				   rdev->family != CHIP_RV350);
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+		pdata->negative = (pdata->negative ||
+				   of_machine_is_compatible("PowerBook4,3") ||
+				   of_machine_is_compatible("PowerBook6,3") ||
+				   of_machine_is_compatible("PowerBook6,5"));
+#endif
+	}
+
+	if (rdev->is_atom_bios) {
+		struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+		lvds->bl_dev = bd;
+	} else {
+		struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+		lvds->bl_dev = bd;
+	}
+
+	bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+
+	DRM_INFO("radeon legacy LVDS backlight initialized\n");
+
+	return;
+
+error:
+	free(pdata, DRM_MEM_DRIVER);
+	return;
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd = NULL;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (rdev->is_atom_bios) {
+		struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+		bd = lvds->bl_dev;
+		lvds->bl_dev = NULL;
+	} else {
+		struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+		bd = lvds->bl_dev;
+		lvds->bl_dev = NULL;
+	}
+
+	if (bd) {
+		struct radeon_backlight_privdata *pdata;
+
+		pdata = bl_get_data(bd);
+		backlight_device_unregister(bd);
+		free(pdata, DRM_MEM_DRIVER);
+
+		DRM_INFO("radeon legacy LVDS backlight unloaded\n");
+	}
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_legacy_backlight_init(struct radeon_encoder *encoder,
+				  struct drm_connector *drm_connector)
+{
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+
+static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (radeon_encoder->enc_priv) {
+		radeon_legacy_backlight_exit(radeon_encoder);
+		free(radeon_encoder->enc_priv, DRM_MEM_DRIVER);
+	}
+	drm_encoder_cleanup(encoder);
+	free(radeon_encoder, DRM_MEM_DRIVER);
+}
+
+static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
+	.destroy = radeon_lvds_enc_destroy,
+};
+
+static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
+	uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		crtc_ext_cntl |= RADEON_CRTC_CRT_ON;
+		dac_cntl &= ~RADEON_DAC_PDWN;
+		dac_macro_cntl &= ~(RADEON_DAC_PDWN_R |
+				    RADEON_DAC_PDWN_G |
+				    RADEON_DAC_PDWN_B);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON;
+		dac_cntl |= RADEON_DAC_PDWN;
+		dac_macro_cntl |= (RADEON_DAC_PDWN_R |
+				   RADEON_DAC_PDWN_G |
+				   RADEON_DAC_PDWN_B);
+		break;
+	}
+
+	/* handled in radeon_crtc_dpms() */
+	if (!(rdev->flags & RADEON_SINGLE_CRTC))
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	WREG32(RADEON_DAC_CNTL, dac_cntl);
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
+					       struct drm_display_mode *mode,
+					       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (radeon_crtc->crtc_id == 0) {
+		if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+				~(RADEON_DISP_DAC_SOURCE_MASK);
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+		} else {
+			dac2_cntl = RREG32(RADEON_DAC_CNTL2)  & ~(RADEON_DAC2_DAC_CLK_SEL);
+			WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+		}
+	} else {
+		if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+				~(RADEON_DISP_DAC_SOURCE_MASK);
+			disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2;
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+		} else {
+			dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL;
+			WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+		}
+	}
+
+	dac_cntl = (RADEON_DAC_MASK_ALL |
+		    RADEON_DAC_VGA_ADR_EN |
+		    /* TODO 6-bits */
+		    RADEON_DAC_8BIT_EN);
+
+	WREG32_P(RADEON_DAC_CNTL,
+		       dac_cntl,
+		       RADEON_DAC_RANGE_CNTL |
+		       RADEON_DAC_BLANKING);
+
+	if (radeon_encoder->enc_priv) {
+		struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv;
+		dac_macro_cntl = p_dac->ps2_pdac_adj;
+	} else
+		dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+	dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B;
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder,
+								  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t vclk_ecp_cntl, crtc_ext_cntl;
+	uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp;
+	enum drm_connector_status found = connector_status_disconnected;
+	bool color = true;
+
+	/* just don't bother on RN50 those chip are often connected to remoting
+	 * console hw and often we get failure to load detect those. So to make
+	 * everyone happy report the encoder as always connected.
+	 */
+	if (ASIC_IS_RN50(rdev)) {
+		return connector_status_connected;
+	}
+
+	/* save the regs we need */
+	vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	dac_cntl = RREG32(RADEON_DAC_CNTL);
+	dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+	tmp = vclk_ecp_cntl &
+		~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb);
+	WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+	tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+	WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+
+	tmp = RADEON_DAC_FORCE_BLANK_OFF_EN |
+		RADEON_DAC_FORCE_DATA_EN;
+
+	if (color)
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+	else
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+	if (ASIC_IS_R300(rdev))
+		tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+	else if (ASIC_IS_RV100(rdev))
+		tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT);
+	else
+		tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+	WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+	tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN);
+	tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
+	WREG32(RADEON_DAC_CNTL, tmp);
+
+	tmp = dac_macro_cntl;
+	tmp &= ~(RADEON_DAC_PDWN_R |
+		 RADEON_DAC_PDWN_G |
+		 RADEON_DAC_PDWN_B);
+
+	WREG32(RADEON_DAC_MACRO_CNTL, tmp);
+
+	mdelay(2);
+
+	if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
+		found = connector_status_connected;
+
+	/* restore the regs we used */
+	WREG32(RADEON_DAC_CNTL, dac_cntl);
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);
+
+	return found;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
+	.dpms = radeon_legacy_primary_dac_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_primary_dac_prepare,
+	.mode_set = radeon_legacy_primary_dac_mode_set,
+	.commit = radeon_legacy_primary_dac_commit,
+	.detect = radeon_legacy_primary_dac_detect,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+		break;
+	}
+
+	WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
+					    struct drm_display_mode *mode,
+					    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
+	int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
+	tmp &= 0xfffff;
+	if (rdev->family == CHIP_RV280) {
+		/* bit 22 of TMDS_PLL_CNTL is read-back inverted */
+		tmp ^= (1 << 22);
+		tmds_pll_cntl ^= (1 << 22);
+	}
+
+	if (radeon_encoder->enc_priv) {
+		struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv;
+
+		for (i = 0; i < 4; i++) {
+			if (tmds->tmds_pll[i].freq == 0)
+				break;
+			if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) {
+				tmp = tmds->tmds_pll[i].value ;
+				break;
+			}
+		}
+	}
+
+	if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) {
+		if (tmp & 0xfff00000)
+			tmds_pll_cntl = tmp;
+		else {
+			tmds_pll_cntl &= 0xfff00000;
+			tmds_pll_cntl |= tmp;
+		}
+	} else
+		tmds_pll_cntl = tmp;
+
+	tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
+		~(RADEON_TMDS_TRANSMITTER_PLLRST);
+
+    if (rdev->family == CHIP_R200 ||
+	rdev->family == CHIP_R100 ||
+	ASIC_IS_R300(rdev))
+	    tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+    else /* RV chips got this bit reversed */
+	    tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+
+    fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+		   (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+		    RADEON_FP_CRTC_DONT_SHADOW_HEND));
+
+    fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+
+    fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+		     RADEON_FP_DFP_SYNC_SEL |
+		     RADEON_FP_CRT_SYNC_SEL |
+		     RADEON_FP_CRTC_LOCK_8DOT |
+		     RADEON_FP_USE_SHADOW_EN |
+		     RADEON_FP_CRTC_USE_SHADOW_VEND |
+		     RADEON_FP_CRT_SYNC_ALT);
+
+    if (1) /*  FIXME rgbBits == 8 */
+	    fp_gen_cntl |= RADEON_FP_PANEL_FORMAT;  /* 24 bit format */
+    else
+	    fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+
+    if (radeon_crtc->crtc_id == 0) {
+	    if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+		    fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+		    if (radeon_encoder->rmx_type != RMX_OFF)
+			    fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+		    else
+			    fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+	    } else
+		    fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
+    } else {
+	    if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+		    fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+		    fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+	    } else
+		    fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+    }
+
+    WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+    WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+    WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
+	.dpms = radeon_legacy_tmds_int_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tmds_int_prepare,
+	.mode_set = radeon_legacy_tmds_int_mode_set,
+	.commit = radeon_legacy_tmds_int_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN;
+		fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		fp2_gen_cntl |= RADEON_FP2_BLANK_EN;
+		fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		break;
+	}
+
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+					    struct drm_display_mode *mode,
+					    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t fp2_gen_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (rdev->is_atom_bios) {
+		radeon_encoder->pixel_clock = adjusted_mode->clock;
+		atombios_dvo_setup(encoder, ATOM_ENABLE);
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	} else {
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+		if (1) /*  FIXME rgbBits == 8 */
+			fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */
+		else
+			fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */
+
+		fp2_gen_cntl &= ~(RADEON_FP2_ON |
+				  RADEON_FP2_DVO_EN |
+				  RADEON_FP2_DVO_RATE_SEL_SDR);
+
+		/* XXX: these are oem specific */
+		if (ASIC_IS_R300(rdev)) {
+			if ((dev->pci_device == 0x4850) &&
+			    (dev->pci_subvendor == 0x1028) &&
+			    (dev->pci_subdevice == 0x2001)) /* Dell Inspiron 8600 */
+				fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
+			else
+				fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
+
+			/*if (mode->clock > 165000)
+			  fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
+		}
+		if (!radeon_combios_external_tmds_setup(encoder))
+			radeon_external_tmds_setup(encoder);
+	}
+
+	if (radeon_crtc->crtc_id == 0) {
+		if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+			fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+			if (radeon_encoder->rmx_type != RMX_OFF)
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
+			else
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
+		} else
+			fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2;
+	} else {
+		if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+			fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+			fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+		} else
+			fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2;
+	}
+
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	/* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+	free(radeon_encoder->enc_priv, DRM_MEM_DRIVER);
+	drm_encoder_cleanup(encoder);
+	free(radeon_encoder, DRM_MEM_DRIVER);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
+	.dpms = radeon_legacy_tmds_ext_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tmds_ext_prepare,
+	.mode_set = radeon_legacy_tmds_ext_mode_set,
+	.commit = radeon_legacy_tmds_ext_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
+	.destroy = radeon_ext_tmds_enc_destroy,
+};
+
+static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
+	uint32_t tv_master_cntl = 0;
+	bool is_tv;
+	DRM_DEBUG_KMS("\n");
+
+	is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
+
+	if (rdev->family == CHIP_R200)
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	else {
+		if (is_tv)
+			tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
+		else
+			crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+		tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (rdev->family == CHIP_R200) {
+			fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		} else {
+			if (is_tv)
+				tv_master_cntl |= RADEON_TV_ON;
+			else
+				crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
+
+			if (rdev->family == CHIP_R420 ||
+			    rdev->family == CHIP_R423 ||
+			    rdev->family == CHIP_RV410)
+				tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
+						 R420_TV_DAC_GDACPD |
+						 R420_TV_DAC_BDACPD |
+						 RADEON_TV_DAC_BGSLEEP);
+			else
+				tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
+						 RADEON_TV_DAC_GDACPD |
+						 RADEON_TV_DAC_BDACPD |
+						 RADEON_TV_DAC_BGSLEEP);
+		}
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (rdev->family == CHIP_R200)
+			fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		else {
+			if (is_tv)
+				tv_master_cntl &= ~RADEON_TV_ON;
+			else
+				crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
+
+			if (rdev->family == CHIP_R420 ||
+			    rdev->family == CHIP_R423 ||
+			    rdev->family == CHIP_RV410)
+				tv_dac_cntl |= (R420_TV_DAC_RDACPD |
+						R420_TV_DAC_GDACPD |
+						R420_TV_DAC_BDACPD |
+						RADEON_TV_DAC_BGSLEEP);
+			else
+				tv_dac_cntl |= (RADEON_TV_DAC_RDACPD |
+						RADEON_TV_DAC_GDACPD |
+						RADEON_TV_DAC_BDACPD |
+						RADEON_TV_DAC_BGSLEEP);
+		}
+		break;
+	}
+
+	if (rdev->family == CHIP_R200) {
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	} else {
+		if (is_tv)
+			WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+		/* handled in radeon_crtc_dpms() */
+		else if (!(rdev->flags & RADEON_SINGLE_CRTC))
+			WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	}
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
+	uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0;
+	bool is_tv = false;
+
+	DRM_DEBUG_KMS("\n");
+
+	is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
+
+	if (rdev->family != CHIP_R200) {
+		tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+		if (rdev->family == CHIP_R420 ||
+		    rdev->family == CHIP_R423 ||
+		    rdev->family == CHIP_RV410) {
+			tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+					 RADEON_TV_DAC_BGADJ_MASK |
+					 R420_TV_DAC_DACADJ_MASK |
+					 R420_TV_DAC_RDACPD |
+					 R420_TV_DAC_GDACPD |
+					 R420_TV_DAC_BDACPD |
+					 R420_TV_DAC_TVENABLE);
+		} else {
+			tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+					 RADEON_TV_DAC_BGADJ_MASK |
+					 RADEON_TV_DAC_DACADJ_MASK |
+					 RADEON_TV_DAC_RDACPD |
+					 RADEON_TV_DAC_GDACPD |
+					 RADEON_TV_DAC_BDACPD);
+		}
+
+		tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+		if (is_tv) {
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J ||
+			    tv_dac->tv_std == TV_STD_PAL_M ||
+			    tv_dac->tv_std == TV_STD_PAL_60)
+				tv_dac_cntl |= tv_dac->ntsc_tvdac_adj;
+			else
+				tv_dac_cntl |= tv_dac->pal_tvdac_adj;
+
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J)
+				tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+			else
+				tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
+		} else
+			tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 |
+					tv_dac->ps2_tvdac_adj);
+
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	}
+
+	if (ASIC_IS_R300(rdev)) {
+		gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
+		disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+	} else if (rdev->family != CHIP_R200)
+		disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+	else if (rdev->family == CHIP_R200)
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+	if (rdev->family >= CHIP_R200)
+		disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
+
+	if (is_tv) {
+		uint32_t dac_cntl;
+
+		dac_cntl = RREG32(RADEON_DAC_CNTL);
+		dac_cntl &= ~RADEON_DAC_TVO_EN;
+		WREG32(RADEON_DAC_CNTL, dac_cntl);
+
+		if (ASIC_IS_R300(rdev))
+			gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1;
+
+		dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL;
+		if (radeon_crtc->crtc_id == 0) {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC |
+						     RADEON_DISP_TV_SOURCE_CRTC);
+			}
+			if (rdev->family >= CHIP_R200) {
+				disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2;
+			} else {
+				disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+			}
+		} else {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC;
+			}
+			if (rdev->family >= CHIP_R200) {
+				disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2;
+			} else {
+				disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+			}
+		}
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	} else {
+
+		dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
+
+		if (radeon_crtc->crtc_id == 0) {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
+			} else if (rdev->family == CHIP_R200) {
+				fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+						  RADEON_FP2_DVO_RATE_SEL_SDR);
+			} else
+				disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+		} else {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+			} else if (rdev->family == CHIP_R200) {
+				fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+						  RADEON_FP2_DVO_RATE_SEL_SDR);
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+			} else
+				disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+		}
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	}
+
+	if (ASIC_IS_R300(rdev)) {
+		WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+		WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	} else if (rdev->family != CHIP_R200)
+		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+	else if (rdev->family == CHIP_R200)
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->family >= CHIP_R200)
+		WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
+
+	if (is_tv)
+		radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+
+}
+
+static bool r300_legacy_tv_detect(struct drm_encoder *encoder,
+				  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+	uint32_t disp_output_cntl, gpiopad_a, tmp;
+	bool found = false;
+
+	/* save regs needed */
+	gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+
+	WREG32_P(RADEON_GPIOPAD_A, 0, ~1);
+
+	WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL);
+
+	WREG32(RADEON_CRTC2_GEN_CNTL,
+	       RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT);
+
+	tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+	tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+	WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+
+	WREG32(RADEON_DAC_EXT_CNTL,
+	       RADEON_DAC2_FORCE_BLANK_OFF_EN |
+	       RADEON_DAC2_FORCE_DATA_EN |
+	       RADEON_DAC_FORCE_DATA_SEL_RGB |
+	       (0xec << RADEON_DAC_FORCE_DATA_SHIFT));
+
+	WREG32(RADEON_TV_DAC_CNTL,
+	       RADEON_TV_DAC_STD_NTSC |
+	       (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
+	       (6 << RADEON_TV_DAC_DACADJ_SHIFT));
+
+	RREG32(RADEON_TV_DAC_CNTL);
+	mdelay(4);
+
+	WREG32(RADEON_TV_DAC_CNTL,
+	       RADEON_TV_DAC_NBLANK |
+	       RADEON_TV_DAC_NHOLD |
+	       RADEON_TV_MONITOR_DETECT_EN |
+	       RADEON_TV_DAC_STD_NTSC |
+	       (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
+	       (6 << RADEON_TV_DAC_DACADJ_SHIFT));
+
+	RREG32(RADEON_TV_DAC_CNTL);
+	mdelay(6);
+
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	if ((tmp & RADEON_TV_DAC_GDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("S-video TV connection detected\n");
+	} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("Composite TV connection detected\n");
+	}
+
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+	return found;
+}
+
+static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
+				    struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tv_dac_cntl, dac_cntl2;
+	uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp;
+	bool found = false;
+
+	if (ASIC_IS_R300(rdev))
+		return r300_legacy_tv_detect(encoder, connector);
+
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+	tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	config_cntl = RREG32(RADEON_CONFIG_CNTL);
+	tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL);
+
+	tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL;
+	WREG32(RADEON_DAC_CNTL2, tmp);
+
+	tmp = tv_master_cntl | RADEON_TV_ON;
+	tmp &= ~(RADEON_TV_ASYNC_RST |
+		 RADEON_RESTART_PHASE_FIX |
+		 RADEON_CRT_FIFO_CE_EN |
+		 RADEON_TV_FIFO_CE_EN |
+		 RADEON_RE_SYNC_NOW_SEL_MASK);
+	tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST;
+	WREG32(RADEON_TV_MASTER_CNTL, tmp);
+
+	tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD |
+		RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC |
+		(8 << RADEON_TV_DAC_BGADJ_SHIFT);
+
+	if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK)
+		tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT);
+	else
+		tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT);
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN |
+		RADEON_RED_MX_FORCE_DAC_DATA |
+		RADEON_GRN_MX_FORCE_DAC_DATA |
+		RADEON_BLU_MX_FORCE_DAC_DATA |
+		(0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT);
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp);
+
+	mdelay(3);
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	if (tmp & RADEON_TV_DAC_GDACDET) {
+		found = true;
+		DRM_DEBUG_KMS("S-video TV connection detected\n");
+	} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("Composite TV connection detected\n");
+	}
+
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl);
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	return found;
+}
+
+static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder,
+					 struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl;
+	uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c;
+	uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f;
+	uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp;
+	uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid;
+	bool found = false;
+	int i;
+
+	/* save the regs we need */
+	gpio_monid = RREG32(RADEON_GPIO_MONID);
+	fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A);
+	disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B);
+	disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C);
+	disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D);
+	disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E);
+	disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F);
+	crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP);
+	crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP);
+	crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID);
+	crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID);
+
+	tmp = RREG32(RADEON_GPIO_MONID);
+	tmp &= ~RADEON_GPIO_A_0;
+	WREG32(RADEON_GPIO_MONID, tmp);
+
+	WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON |
+				     RADEON_FP2_PANEL_FORMAT |
+				     R200_FP2_SOURCE_SEL_TRANS_UNIT |
+				     RADEON_FP2_DVO_EN |
+				     R200_FP2_DVO_RATE_SEL_SDR));
+
+	WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX |
+					 RADEON_DISP_TRANS_MATRIX_GRAPHICS));
+
+	WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN |
+				       RADEON_CRTC2_DISP_REQ_EN_B));
+
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0);
+
+	WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008);
+	WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800);
+	WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001);
+	WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080);
+
+	for (i = 0; i < 200; i++) {
+		tmp = RREG32(RADEON_GPIO_MONID);
+		if (tmp & RADEON_GPIO_Y_0)
+			found = true;
+
+		if (found)
+			break;
+
+		if (!drm_can_sleep())
+			mdelay(1);
+		else
+			DRM_MSLEEP(1);
+	}
+
+	/* restore the regs we used */
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f);
+	WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp);
+	WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp);
+	WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid);
+	WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid);
+	WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	WREG32(RADEON_GPIO_MONID, gpio_monid);
+
+	return found;
+}
+
+static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
+							     struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+	uint32_t gpiopad_a = 0, pixclks_cntl, tmp;
+	uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0;
+	enum drm_connector_status found = connector_status_disconnected;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	bool color = true;
+	struct drm_crtc *crtc;
+
+	/* find out if crtc2 is in use or if this encoder is using it */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+		if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
+			if (encoder->crtc != crtc) {
+				return connector_status_disconnected;
+			}
+		}
+	}
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) {
+		bool tv_detect;
+
+		if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT))
+			return connector_status_disconnected;
+
+		tv_detect = radeon_legacy_tv_detect(encoder, connector);
+		if (tv_detect && tv_dac)
+			found = connector_status_connected;
+		return found;
+	}
+
+	/* don't probe if the encoder is being used for something else not CRT related */
+	if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) {
+		DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device);
+		return connector_status_disconnected;
+	}
+
+	/* R200 uses an external DAC for secondary DAC */
+	if (rdev->family == CHIP_R200) {
+		if (radeon_legacy_ext_dac_detect(encoder, connector))
+			found = connector_status_connected;
+		return found;
+	}
+
+	/* save the regs we need */
+	pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	} else {
+		if (ASIC_IS_R300(rdev)) {
+			gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+		} else {
+			disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+		}
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	}
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+
+	tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb
+			       | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
+	WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+		WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+	} else {
+		tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
+		tmp |= RADEON_CRTC2_CRT2_ON |
+			(2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
+		WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+
+		if (ASIC_IS_R300(rdev)) {
+			WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
+			tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+			tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+			WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+		} else {
+			tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
+			WREG32(RADEON_DISP_HW_DEBUG, tmp);
+		}
+	}
+
+	tmp = RADEON_TV_DAC_NBLANK |
+		RADEON_TV_DAC_NHOLD |
+		RADEON_TV_MONITOR_DETECT_EN |
+		RADEON_TV_DAC_STD_PS2;
+
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN |
+		RADEON_DAC2_FORCE_DATA_EN;
+
+	if (color)
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+	else
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+	if (ASIC_IS_R300(rdev))
+		tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+	else
+		tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+	WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+	tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
+	WREG32(RADEON_DAC_CNTL2, tmp);
+
+	mdelay(10);
+
+	if (ASIC_IS_R300(rdev)) {
+		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
+			found = connector_status_connected;
+	} else {
+		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT)
+			found = connector_status_connected;
+	}
+
+	/* restore regs we used */
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	} else {
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+		if (ASIC_IS_R300(rdev)) {
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+			WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+		} else {
+			WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+		}
+	}
+
+	WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+
+	return found;
+
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
+	.dpms = radeon_legacy_tv_dac_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tv_dac_prepare,
+	.mode_set = radeon_legacy_tv_dac_mode_set,
+	.commit = radeon_legacy_tv_dac_commit,
+	.detect = radeon_legacy_tv_dac_detect,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+
+static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_int_tmds *tmds = NULL;
+	bool ret;
+
+	tmds = malloc(sizeof(struct radeon_encoder_int_tmds),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+	if (!tmds)
+		return NULL;
+
+	if (rdev->is_atom_bios)
+		ret = radeon_atombios_get_tmds_info(encoder, tmds);
+	else
+		ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
+
+	if (ret == false)
+		radeon_legacy_get_tmds_info_from_table(encoder, tmds);
+
+	return tmds;
+}
+
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_ext_tmds *tmds = NULL;
+	bool ret;
+
+	if (rdev->is_atom_bios)
+		return NULL;
+
+	tmds = malloc(sizeof(struct radeon_encoder_ext_tmds),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+
+	if (!tmds)
+		return NULL;
+
+	ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+	if (ret == false)
+		radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+	return tmds;
+}
+
+void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->encoder_enum == encoder_enum) {
+			radeon_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	radeon_encoder = malloc(sizeof(struct radeon_encoder),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!radeon_encoder)
+		return;
+
+	encoder = &radeon_encoder->base;
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		encoder->possible_crtcs = 0x1;
+	else
+		encoder->possible_crtcs = 0x3;
+
+	radeon_encoder->enc_priv = NULL;
+
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	radeon_encoder->devices = supported_device;
+	radeon_encoder->rmx_type = RMX_OFF;
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		encoder->possible_crtcs = 0x1;
+		drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
+		drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder);
+		radeon_encoder->rmx_type = RMX_FULL;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
+		radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC);
+		drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
+		if (!rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
+		break;
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_legacy_encoders.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_legacy_tv.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_legacy_tv.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_legacy_tv.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,927 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_legacy_tv.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include "radeon.h"
+
+/*
+ * Integrated TV out support based on the GATOS code by
+ * Federico Ulivi <fulivi at lycos.com>
+ */
+
+
+/*
+ * Limits of h/v positions (hPos & vPos)
+ */
+#define MAX_H_POSITION 5 /* Range: [-5..5], negative is on the left, 0 is default, positive is on the right */
+#define MAX_V_POSITION 5 /* Range: [-5..5], negative is up, 0 is default, positive is down */
+
+/*
+ * Unit for hPos (in TV clock periods)
+ */
+#define H_POS_UNIT 10
+
+/*
+ * Indexes in h. code timing table for horizontal line position adjustment
+ */
+#define H_TABLE_POS1 6
+#define H_TABLE_POS2 8
+
+/*
+ * Limits of hor. size (hSize)
+ */
+#define MAX_H_SIZE 5 /* Range: [-5..5], negative is smaller, positive is larger */
+
+/* tv standard constants */
+#define NTSC_TV_CLOCK_T 233
+#define NTSC_TV_VFTOTAL 1
+#define NTSC_TV_LINES_PER_FRAME 525
+#define NTSC_TV_ZERO_H_SIZE 479166
+#define NTSC_TV_H_SIZE_UNIT 9478
+
+#define PAL_TV_CLOCK_T 188
+#define PAL_TV_VFTOTAL 3
+#define PAL_TV_LINES_PER_FRAME 625
+#define PAL_TV_ZERO_H_SIZE 473200
+#define PAL_TV_H_SIZE_UNIT 9360
+
+/* tv pll setting for 27 mhz ref clk */
+#define NTSC_TV_PLL_M_27 22
+#define NTSC_TV_PLL_N_27 175
+#define NTSC_TV_PLL_P_27 5
+
+#define PAL_TV_PLL_M_27 113
+#define PAL_TV_PLL_N_27 668
+#define PAL_TV_PLL_P_27 3
+
+/* tv pll setting for 14 mhz ref clk */
+#define NTSC_TV_PLL_M_14 33
+#define NTSC_TV_PLL_N_14 693
+#define NTSC_TV_PLL_P_14 7
+
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
+#define VERT_LEAD_IN_LINES 2
+#define FRAC_BITS 0xe
+#define FRAC_MASK 0x3fff
+
+struct radeon_tv_mode_constants {
+	uint16_t hor_resolution;
+	uint16_t ver_resolution;
+	enum radeon_tv_std standard;
+	uint16_t hor_total;
+	uint16_t ver_total;
+	uint16_t hor_start;
+	uint16_t hor_syncstart;
+	uint16_t ver_syncstart;
+	unsigned def_restart;
+	uint16_t crtcPLL_N;
+	uint8_t  crtcPLL_M;
+	uint8_t  crtcPLL_post_div;
+	unsigned pix_to_tv;
+};
+
+static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
+	0x0007,
+	0x003f,
+	0x0263,
+	0x0a24,
+	0x2a6b,
+	0x0a36,
+	0x126d, /* H_TABLE_POS1 */
+	0x1bfe,
+	0x1a8f, /* H_TABLE_POS2 */
+	0x1ec7,
+	0x3863,
+	0x1bfe,
+	0x1bfe,
+	0x1a2a,
+	0x1e95,
+	0x0e31,
+	0x201b,
+	0
+};
+
+static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
+	0x2001,
+	0x200d,
+	0x1006,
+	0x0c06,
+	0x1006,
+	0x1818,
+	0x21e3,
+	0x1006,
+	0x0c06,
+	0x1006,
+	0x1817,
+	0x21d4,
+	0x0002,
+	0
+};
+
+static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
+	0x0007,
+	0x0058,
+	0x027c,
+	0x0a31,
+	0x2a77,
+	0x0a95,
+	0x124f, /* H_TABLE_POS1 */
+	0x1bfe,
+	0x1b22, /* H_TABLE_POS2 */
+	0x1ef9,
+	0x387c,
+	0x1bfe,
+	0x1bfe,
+	0x1b31,
+	0x1eb5,
+	0x0e43,
+	0x201b,
+	0
+};
+
+static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
+	0x2001,
+	0x200c,
+	0x1005,
+	0x0c05,
+	0x1005,
+	0x1401,
+	0x1821,
+	0x2240,
+	0x1005,
+	0x0c05,
+	0x1005,
+	0x1401,
+	0x1822,
+	0x2230,
+	0x0002,
+	0
+};
+
+/**********************************************************************
+ *
+ * availableModes
+ *
+ * Table of all allowed modes for tv output
+ *
+ **********************************************************************/
+static const struct radeon_tv_mode_constants available_tv_modes[] = {
+	{   /* NTSC timing for 27 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_NTSC,        /* standard */
+		990,                /* horTotal */
+		740,                /* verTotal */
+		813,                /* horStart */
+		824,                /* horSyncStart */
+		632,                /* verSyncStart */
+		625592,             /* defRestart */
+		592,                /* crtcPLL_N */
+		91,                 /* crtcPLL_M */
+		4,                  /* crtcPLL_postDiv */
+		1022,               /* pixToTV */
+	},
+	{   /* PAL timing for 27 Mhz ref clk */
+		800,               /* horResolution */
+		600,               /* verResolution */
+		TV_STD_PAL,        /* standard */
+		1144,              /* horTotal */
+		706,               /* verTotal */
+		812,               /* horStart */
+		824,               /* horSyncStart */
+		669,               /* verSyncStart */
+		696700,            /* defRestart */
+		1382,              /* crtcPLL_N */
+		231,               /* crtcPLL_M */
+		4,                 /* crtcPLL_postDiv */
+		759,               /* pixToTV */
+	},
+	{   /* NTSC timing for 14 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_NTSC,        /* standard */
+		1018,               /* horTotal */
+		727,                /* verTotal */
+		813,                /* horStart */
+		840,                /* horSyncStart */
+		633,                /* verSyncStart */
+		630627,             /* defRestart */
+		347,                /* crtcPLL_N */
+		14,                 /* crtcPLL_M */
+		8,                  /* crtcPLL_postDiv */
+		1022,               /* pixToTV */
+	},
+	{ /* PAL timing for 14 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_PAL,         /* standard */
+		1131,               /* horTotal */
+		742,                /* verTotal */
+		813,                /* horStart */
+		840,                /* horSyncStart */
+		633,                /* verSyncStart */
+		708369,             /* defRestart */
+		211,                /* crtcPLL_N */
+		9,                  /* crtcPLL_M */
+		8,                  /* crtcPLL_postDiv */
+		759,                /* pixToTV */
+	},
+};
+
+#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
+
+static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(struct radeon_encoder *radeon_encoder,
+									    uint16_t *pll_ref_freq)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_pll *pll;
+
+	radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
+	if (radeon_crtc->crtc_id == 1)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	if (pll_ref_freq)
+		*pll_ref_freq = pll->reference_freq;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M) {
+		if (pll->reference_freq == 2700)
+			const_ptr = &available_tv_modes[0];
+		else
+			const_ptr = &available_tv_modes[2];
+	} else {
+		if (pll->reference_freq == 2700)
+			const_ptr = &available_tv_modes[1];
+		else
+			const_ptr = &available_tv_modes[3];
+	}
+	return const_ptr;
+}
+
+static long YCOEF_value[5] = { 2, 2, 0, 4, 0 };
+static long YCOEF_EN_value[5] = { 1, 1, 0, 1, 0 };
+static long SLOPE_value[5] = { 1, 2, 2, 4, 8 };
+static long SLOPE_limit[5] = { 6, 5, 4, 3, 2 };
+
+static void radeon_wait_pll_lock(struct drm_encoder *encoder, unsigned n_tests,
+				 unsigned n_wait_loops, unsigned cnt_threshold)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t save_pll_test;
+	unsigned int i, j;
+
+	WREG32(RADEON_TEST_DEBUG_MUX, (RREG32(RADEON_TEST_DEBUG_MUX) & 0xffff60ff) | 0x100);
+	save_pll_test = RREG32_PLL(RADEON_PLL_TEST_CNTL);
+	WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test & ~RADEON_PLL_MASK_READ_B);
+
+	WREG8(RADEON_CLOCK_CNTL_INDEX, RADEON_PLL_TEST_CNTL);
+	for (i = 0; i < n_tests; i++) {
+		WREG8(RADEON_CLOCK_CNTL_DATA + 3, 0);
+		for (j = 0; j < n_wait_loops; j++)
+			if (RREG8(RADEON_CLOCK_CNTL_DATA + 3) >= cnt_threshold)
+				break;
+	}
+	WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test);
+	WREG32(RADEON_TEST_DEBUG_MUX, RREG32(RADEON_TEST_DEBUG_MUX) & 0xffffe0ff);
+}
+
+
+static void radeon_legacy_tv_write_fifo(struct radeon_encoder *radeon_encoder,
+					uint16_t addr, uint32_t value)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+	int i = 0;
+
+	WREG32(RADEON_TV_HOST_WRITE_DATA, value);
+
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_WT);
+
+	do {
+		tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
+		if ((tmp & RADEON_HOST_FIFO_WT_ACK) == 0)
+			break;
+		i++;
+	} while (i < 10000);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
+}
+
+#if 0 /* included for completeness */
+static uint32_t radeon_legacy_tv_read_fifo(struct radeon_encoder *radeon_encoder, uint16_t addr)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+	int i = 0;
+
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_RD);
+
+	do {
+		tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
+		if ((tmp & RADEON_HOST_FIFO_RD_ACK) == 0)
+			break;
+		i++;
+	} while (i < 10000);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
+	return RREG32(RADEON_TV_HOST_READ_DATA);
+}
+#endif
+
+static uint16_t radeon_get_htiming_tables_addr(uint32_t tv_uv_adr)
+{
+	uint16_t h_table;
+
+	switch ((tv_uv_adr & RADEON_HCODE_TABLE_SEL_MASK) >> RADEON_HCODE_TABLE_SEL_SHIFT) {
+	case 0:
+		h_table = RADEON_TV_MAX_FIFO_ADDR_INTERNAL;
+		break;
+	case 1:
+		h_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2;
+		break;
+	case 2:
+		h_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2;
+		break;
+	default:
+		h_table = 0;
+		break;
+	}
+	return h_table;
+}
+
+static uint16_t radeon_get_vtiming_tables_addr(uint32_t tv_uv_adr)
+{
+	uint16_t v_table;
+
+	switch ((tv_uv_adr & RADEON_VCODE_TABLE_SEL_MASK) >> RADEON_VCODE_TABLE_SEL_SHIFT) {
+	case 0:
+		v_table = ((tv_uv_adr & RADEON_MAX_UV_ADR_MASK) >> RADEON_MAX_UV_ADR_SHIFT) * 2 + 1;
+		break;
+	case 1:
+		v_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2 + 1;
+		break;
+	case 2:
+		v_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2 + 1;
+		break;
+	default:
+		v_table = 0;
+		break;
+	}
+	return v_table;
+}
+
+static void radeon_restore_tv_timing_tables(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	uint16_t h_table, v_table;
+	uint32_t tmp;
+	int i;
+
+	WREG32(RADEON_TV_UV_ADR, tv_dac->tv.tv_uv_adr);
+	h_table = radeon_get_htiming_tables_addr(tv_dac->tv.tv_uv_adr);
+	v_table = radeon_get_vtiming_tables_addr(tv_dac->tv.tv_uv_adr);
+
+	for (i = 0; i < MAX_H_CODE_TIMING_LEN; i += 2, h_table--) {
+		tmp = ((uint32_t)tv_dac->tv.h_code_timing[i] << 14) | ((uint32_t)tv_dac->tv.h_code_timing[i+1]);
+		radeon_legacy_tv_write_fifo(radeon_encoder, h_table, tmp);
+		if (tv_dac->tv.h_code_timing[i] == 0 || tv_dac->tv.h_code_timing[i + 1] == 0)
+			break;
+	}
+	for (i = 0; i < MAX_V_CODE_TIMING_LEN; i += 2, v_table++) {
+		tmp = ((uint32_t)tv_dac->tv.v_code_timing[i+1] << 14) | ((uint32_t)tv_dac->tv.v_code_timing[i]);
+		radeon_legacy_tv_write_fifo(radeon_encoder, v_table, tmp);
+		if (tv_dac->tv.v_code_timing[i] == 0 || tv_dac->tv.v_code_timing[i + 1] == 0)
+			break;
+	}
+}
+
+static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	WREG32(RADEON_TV_FRESTART, tv_dac->tv.frestart);
+	WREG32(RADEON_TV_HRESTART, tv_dac->tv.hrestart);
+	WREG32(RADEON_TV_VRESTART, tv_dac->tv.vrestart);
+}
+
+static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	struct radeon_crtc *radeon_crtc;
+	int restart;
+	unsigned int h_total, v_total, f_total;
+	int v_offset, h_offset;
+	u16 p1, p2, h_inc;
+	bool h_changed;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_pll *pll;
+
+	radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
+	if (radeon_crtc->crtc_id == 1)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return false;
+
+	h_total = const_ptr->hor_total;
+	v_total = const_ptr->ver_total;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		f_total = NTSC_TV_VFTOTAL + 1;
+	else
+		f_total = PAL_TV_VFTOTAL + 1;
+
+	/* adjust positions 1&2 in hor. cod timing table */
+	h_offset = tv_dac->h_pos * H_POS_UNIT;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M) {
+		h_offset -= 50;
+		p1 = hor_timing_NTSC[H_TABLE_POS1];
+		p2 = hor_timing_NTSC[H_TABLE_POS2];
+	} else {
+		p1 = hor_timing_PAL[H_TABLE_POS1];
+		p2 = hor_timing_PAL[H_TABLE_POS2];
+	}
+
+	p1 = (u16)((int)p1 + h_offset);
+	p2 = (u16)((int)p2 - h_offset);
+
+	h_changed = (p1 != tv_dac->tv.h_code_timing[H_TABLE_POS1] ||
+		     p2 != tv_dac->tv.h_code_timing[H_TABLE_POS2]);
+
+	tv_dac->tv.h_code_timing[H_TABLE_POS1] = p1;
+	tv_dac->tv.h_code_timing[H_TABLE_POS2] = p2;
+
+	/* Convert hOffset from n. of TV clock periods to n. of CRTC clock periods (CRTC pixels) */
+	h_offset = (h_offset * (int)(const_ptr->pix_to_tv)) / 1000;
+
+	/* adjust restart */
+	restart = const_ptr->def_restart;
+
+	/*
+	 * convert v_pos TV lines to n. of CRTC pixels
+	 */
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(NTSC_TV_LINES_PER_FRAME);
+	else
+		v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(PAL_TV_LINES_PER_FRAME);
+
+	restart -= v_offset + h_offset;
+
+	DRM_DEBUG_KMS("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
+		  const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart);
+
+	tv_dac->tv.hrestart = restart % h_total;
+	restart /= h_total;
+	tv_dac->tv.vrestart = restart % v_total;
+	restart /= v_total;
+	tv_dac->tv.frestart = restart % f_total;
+
+	DRM_DEBUG_KMS("compute_restart: F/H/V=%u,%u,%u\n",
+		  (unsigned)tv_dac->tv.frestart,
+		  (unsigned)tv_dac->tv.vrestart,
+		  (unsigned)tv_dac->tv.hrestart);
+
+	/* compute h_inc from hsize */
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M)
+		h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * NTSC_TV_CLOCK_T) /
+			      (tv_dac->h_size * (int)(NTSC_TV_H_SIZE_UNIT) + (int)(NTSC_TV_ZERO_H_SIZE)));
+	else
+		h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * PAL_TV_CLOCK_T) /
+			      (tv_dac->h_size * (int)(PAL_TV_H_SIZE_UNIT) + (int)(PAL_TV_ZERO_H_SIZE)));
+
+	tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) |
+		((u32)h_inc << RADEON_H_INC_SHIFT);
+
+	DRM_DEBUG_KMS("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
+
+	return h_changed;
+}
+
+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_crtc *radeon_crtc;
+	int i;
+	uint16_t pll_ref_freq;
+	uint32_t vert_space, flicker_removal, tmp;
+	uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
+	uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
+	uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
+	uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
+	uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
+	uint32_t m, n, p;
+	const uint16_t *hor_timing;
+	const uint16_t *vert_timing;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, &pll_ref_freq);
+	if (!const_ptr)
+		return;
+
+	radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+	tv_master_cntl = (RADEON_VIN_ASYNC_RST |
+			  RADEON_CRT_FIFO_CE_EN |
+			  RADEON_TV_FIFO_CE_EN |
+			  RADEON_TV_ON);
+
+	if (!ASIC_IS_R300(rdev))
+		tv_master_cntl |= RADEON_TVCLK_ALWAYS_ONb;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J)
+		tv_master_cntl |= RADEON_RESTART_PHASE_FIX;
+
+	tv_modulator_cntl1 = (RADEON_SLEW_RATE_LIMIT |
+			      RADEON_SYNC_TIP_LEVEL |
+			      RADEON_YFLT_EN |
+			      RADEON_UVFLT_EN |
+			      (6 << RADEON_CY_FILT_BLEND_SHIFT));
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J) {
+		tv_modulator_cntl1 |= (0x46 << RADEON_SET_UP_LEVEL_SHIFT) |
+			(0x3b << RADEON_BLANK_LEVEL_SHIFT);
+		tv_modulator_cntl2 = (-111 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	} else if (tv_dac->tv_std == TV_STD_SCART_PAL) {
+		tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN;
+		tv_modulator_cntl2 = (0 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	} else {
+		tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN |
+			(0x3b << RADEON_SET_UP_LEVEL_SHIFT) |
+			(0x3b << RADEON_BLANK_LEVEL_SHIFT);
+		tv_modulator_cntl2 = (-78 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((62 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	}
+
+
+	tv_rgb_cntl = (RADEON_RGB_DITHER_EN
+		       | RADEON_TVOUT_SCALE_EN
+		       | (0x0b << RADEON_UVRAM_READ_MARGIN_SHIFT)
+		       | (0x07 << RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT)
+		       | RADEON_RGB_ATTEN_SEL(0x3)
+		       | RADEON_RGB_ATTEN_VAL(0xc));
+
+	if (radeon_crtc->crtc_id == 1)
+		tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC2;
+	else {
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			tv_rgb_cntl |= RADEON_RGB_SRC_SEL_RMX;
+		else
+			tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC1;
+	}
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		vert_space = const_ptr->ver_total * 2 * 10000 / NTSC_TV_LINES_PER_FRAME;
+	else
+		vert_space = const_ptr->ver_total * 2 * 10000 / PAL_TV_LINES_PER_FRAME;
+
+	tmp = RREG32(RADEON_TV_VSCALER_CNTL1);
+	tmp &= 0xe3ff0000;
+	tmp |= (vert_space * (1 << FRAC_BITS) / 10000);
+	tv_vscaler_cntl1 = tmp;
+
+	if (pll_ref_freq == 2700)
+		tv_vscaler_cntl1 |= RADEON_RESTART_FIELD;
+
+	if (const_ptr->hor_resolution == 1024)
+		tv_vscaler_cntl1 |= (4 << RADEON_Y_DEL_W_SIG_SHIFT);
+	else
+		tv_vscaler_cntl1 |= (2 << RADEON_Y_DEL_W_SIG_SHIFT);
+
+	/* scale up for int divide */
+	tmp = const_ptr->ver_total * 2 * 1000;
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60) {
+		tmp /= NTSC_TV_LINES_PER_FRAME;
+	} else {
+		tmp /= PAL_TV_LINES_PER_FRAME;
+	}
+	flicker_removal = (tmp + 500) / 1000;
+
+	if (flicker_removal < 3)
+		flicker_removal = 3;
+	for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
+		if (flicker_removal == SLOPE_limit[i])
+			break;
+	}
+
+	tv_y_saw_tooth_cntl = (vert_space * SLOPE_value[i] * (1 << (FRAC_BITS - 1)) +
+				5001) / 10000 / 8 | ((SLOPE_value[i] *
+				(1 << (FRAC_BITS - 1)) / 8) << 16);
+	tv_y_fall_cntl =
+		(YCOEF_EN_value[i] << 17) | ((YCOEF_value[i] * (1 << 8) / 8) << 24) |
+		RADEON_Y_FALL_PING_PONG | (272 * SLOPE_value[i] / 8) * (1 << (FRAC_BITS - 1)) /
+		1024;
+	tv_y_rise_cntl = RADEON_Y_RISE_PING_PONG|
+		(flicker_removal * 1024 - 272) * SLOPE_value[i] / 8 * (1 << (FRAC_BITS - 1)) / 1024;
+
+	tv_vscaler_cntl2 = RREG32(RADEON_TV_VSCALER_CNTL2) & 0x00fffff0;
+	tv_vscaler_cntl2 |= (0x10 << 24) |
+		RADEON_DITHER_MODE |
+		RADEON_Y_OUTPUT_DITHER_EN |
+		RADEON_UV_OUTPUT_DITHER_EN |
+		RADEON_UV_TO_BUF_DITHER_EN;
+
+	tmp = (tv_vscaler_cntl1 >> RADEON_UV_INC_SHIFT) & RADEON_UV_INC_MASK;
+	tmp = ((16384 * 256 * 10) / tmp + 5) / 10;
+	tmp = (tmp << RADEON_UV_OUTPUT_POST_SCALE_SHIFT) | 0x000b0000;
+	tv_dac->tv.timing_cntl = tmp;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		tv_dac_cntl = tv_dac->ntsc_tvdac_adj;
+	else
+		tv_dac_cntl = tv_dac->pal_tvdac_adj;
+
+	tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J)
+		tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+	else
+		tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J) {
+		if (pll_ref_freq == 2700) {
+			m = NTSC_TV_PLL_M_27;
+			n = NTSC_TV_PLL_N_27;
+			p = NTSC_TV_PLL_P_27;
+		} else {
+			m = NTSC_TV_PLL_M_14;
+			n = NTSC_TV_PLL_N_14;
+			p = NTSC_TV_PLL_P_14;
+		}
+	} else {
+		if (pll_ref_freq == 2700) {
+			m = PAL_TV_PLL_M_27;
+			n = PAL_TV_PLL_N_27;
+			p = PAL_TV_PLL_P_27;
+		} else {
+			m = PAL_TV_PLL_M_14;
+			n = PAL_TV_PLL_N_14;
+			p = PAL_TV_PLL_P_14;
+		}
+	}
+
+	tv_pll_cntl = (m & RADEON_TV_M0LO_MASK) |
+		(((m >> 8) & RADEON_TV_M0HI_MASK) << RADEON_TV_M0HI_SHIFT) |
+		((n & RADEON_TV_N0LO_MASK) << RADEON_TV_N0LO_SHIFT) |
+		(((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) |
+		((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
+
+	tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) |
+			((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) |
+			((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) |
+			RADEON_TVCLK_SRC_SEL_TVPLL |
+			RADEON_TVPLL_TEST_DIS);
+
+	tv_dac->tv.tv_uv_adr = 0xc8;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60) {
+		tv_ftotal = NTSC_TV_VFTOTAL;
+		hor_timing = hor_timing_NTSC;
+		vert_timing = vert_timing_NTSC;
+	} else {
+		hor_timing = hor_timing_PAL;
+		vert_timing = vert_timing_PAL;
+		tv_ftotal = PAL_TV_VFTOTAL;
+	}
+
+	for (i = 0; i < MAX_H_CODE_TIMING_LEN; i++) {
+		if ((tv_dac->tv.h_code_timing[i] = hor_timing[i]) == 0)
+			break;
+	}
+
+	for (i = 0; i < MAX_V_CODE_TIMING_LEN; i++) {
+		if ((tv_dac->tv.v_code_timing[i] = vert_timing[i]) == 0)
+			break;
+	}
+
+	radeon_legacy_tv_init_restarts(encoder);
+
+	/* play with DAC_CNTL */
+	/* play with GPIOPAD_A */
+	/* DISP_OUTPUT_CNTL */
+	/* use reference freq */
+
+	/* program the TV registers */
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
+				       RADEON_CRT_ASYNC_RST | RADEON_TV_FIFO_ASYNC_RST));
+
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	tmp &= ~RADEON_TV_DAC_NBLANK;
+	tmp |= RADEON_TV_DAC_BGSLEEP |
+		RADEON_TV_DAC_RDACPD |
+		RADEON_TV_DAC_GDACPD |
+		RADEON_TV_DAC_BDACPD;
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	/* TV PLL */
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVCLK_SRC_SEL_TVPLL);
+	WREG32_PLL(RADEON_TV_PLL_CNTL, tv_pll_cntl);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVPLL_RESET, ~RADEON_TVPLL_RESET);
+
+	radeon_wait_pll_lock(encoder, 200, 800, 135);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_RESET);
+
+	radeon_wait_pll_lock(encoder, 300, 160, 27);
+	radeon_wait_pll_lock(encoder, 200, 800, 135);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~0xf);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVCLK_SRC_SEL_TVPLL, ~RADEON_TVCLK_SRC_SEL_TVPLL);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, (1 << RADEON_TVPDC_SHIFT), ~RADEON_TVPDC_MASK);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_SLEEP);
+
+	/* TV HV */
+	WREG32(RADEON_TV_RGB_CNTL, tv_rgb_cntl);
+	WREG32(RADEON_TV_HTOTAL, const_ptr->hor_total - 1);
+	WREG32(RADEON_TV_HDISP, const_ptr->hor_resolution - 1);
+	WREG32(RADEON_TV_HSTART, const_ptr->hor_start);
+
+	WREG32(RADEON_TV_VTOTAL, const_ptr->ver_total - 1);
+	WREG32(RADEON_TV_VDISP, const_ptr->ver_resolution - 1);
+	WREG32(RADEON_TV_FTOTAL, tv_ftotal);
+	WREG32(RADEON_TV_VSCALER_CNTL1, tv_vscaler_cntl1);
+	WREG32(RADEON_TV_VSCALER_CNTL2, tv_vscaler_cntl2);
+
+	WREG32(RADEON_TV_Y_FALL_CNTL, tv_y_fall_cntl);
+	WREG32(RADEON_TV_Y_RISE_CNTL, tv_y_rise_cntl);
+	WREG32(RADEON_TV_Y_SAW_TOOTH_CNTL, tv_y_saw_tooth_cntl);
+
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
+				       RADEON_CRT_ASYNC_RST));
+
+	/* TV restarts */
+	radeon_legacy_write_tv_restarts(radeon_encoder);
+
+	/* tv timings */
+	radeon_restore_tv_timing_tables(radeon_encoder);
+
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST));
+
+	/* tv std */
+	WREG32(RADEON_TV_SYNC_CNTL, (RADEON_SYNC_PUB | RADEON_TV_SYNC_IO_DRIVE));
+	WREG32(RADEON_TV_TIMING_CNTL, tv_dac->tv.timing_cntl);
+	WREG32(RADEON_TV_MODULATOR_CNTL1, tv_modulator_cntl1);
+	WREG32(RADEON_TV_MODULATOR_CNTL2, tv_modulator_cntl2);
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, (RADEON_Y_RED_EN |
+					    RADEON_C_GRN_EN |
+					    RADEON_CMP_BLU_EN |
+					    RADEON_DAC_DITHER_EN));
+
+	WREG32(RADEON_TV_CRC_CNTL, 0);
+
+	WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+
+	WREG32(RADEON_TV_GAIN_LIMIT_SETTINGS, ((0x17f << RADEON_UV_GAIN_LIMIT_SHIFT) |
+					       (0x5ff << RADEON_Y_GAIN_LIMIT_SHIFT)));
+	WREG32(RADEON_TV_LINEAR_GAIN_SETTINGS, ((0x100 << RADEON_UV_GAIN_SHIFT) |
+						(0x100 << RADEON_Y_GAIN_SHIFT)));
+
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+
+}
+
+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
+				      uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
+				      uint32_t *v_total_disp, uint32_t *v_sync_strt_wid)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+	uint32_t tmp;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*h_total_disp = (((const_ptr->hor_resolution / 8) - 1) << RADEON_CRTC_H_DISP_SHIFT) |
+		(((const_ptr->hor_total / 8) - 1) << RADEON_CRTC_H_TOTAL_SHIFT);
+
+	tmp = *h_sync_strt_wid;
+	tmp &= ~(RADEON_CRTC_H_SYNC_STRT_PIX | RADEON_CRTC_H_SYNC_STRT_CHAR);
+	tmp |= (((const_ptr->hor_syncstart / 8) - 1) << RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT) |
+		(const_ptr->hor_syncstart & 7);
+	*h_sync_strt_wid = tmp;
+
+	*v_total_disp = ((const_ptr->ver_resolution - 1) << RADEON_CRTC_V_DISP_SHIFT) |
+		((const_ptr->ver_total - 1) << RADEON_CRTC_V_TOTAL_SHIFT);
+
+	tmp = *v_sync_strt_wid;
+	tmp &= ~RADEON_CRTC_V_SYNC_STRT;
+	tmp |= ((const_ptr->ver_syncstart - 1) << RADEON_CRTC_V_SYNC_STRT_SHIFT);
+	*v_sync_strt_wid = tmp;
+}
+
+static int get_post_div(int value)
+{
+	int post_div;
+	switch (value) {
+	case 1: post_div = 0; break;
+	case 2: post_div = 1; break;
+	case 3: post_div = 4; break;
+	case 4: post_div = 2; break;
+	case 6: post_div = 6; break;
+	case 8: post_div = 3; break;
+	case 12: post_div = 7; break;
+	case 16:
+	default: post_div = 5; break;
+	}
+	return post_div;
+}
+
+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
+				  uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
+				  uint32_t *ppll_div_3, uint32_t *pixclks_cntl)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*htotal_cntl = (const_ptr->hor_total & 0x7) | RADEON_HTOT_CNTL_VGA_EN;
+
+	*ppll_ref_div = const_ptr->crtcPLL_M;
+
+	*ppll_div_3 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
+	*pixclks_cntl &= ~(RADEON_PIX2CLK_SRC_SEL_MASK | RADEON_PIXCLK_TV_SRC_SEL);
+	*pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK;
+}
+
+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
+				  uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
+				  uint32_t *p2pll_div_0, uint32_t *pixclks_cntl)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*htotal2_cntl = (const_ptr->hor_total & 0x7);
+
+	*p2pll_ref_div = const_ptr->crtcPLL_M;
+
+	*p2pll_div_0 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
+	*pixclks_cntl &= ~RADEON_PIX2CLK_SRC_SEL_MASK;
+	*pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK | RADEON_PIXCLK_TV_SRC_SEL;
+}
+


Property changes on: trunk/sys/dev/drm2/radeon/radeon_legacy_tv.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_mem.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_mem.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_mem.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,305 @@
+/* $MidnightBSD$ */
+/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith at tungstengraphics.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_mem.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+
+/* Very simple allocator for GART memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size,
+				     struct drm_file *file_priv)
+{
+	/* Maybe cut off the start of an existing block */
+	if (start > p->start) {
+		struct mem_block *newblock = malloc(sizeof(*newblock),
+						     DRM_MEM_DRIVER, M_NOWAIT);
+		if (!newblock)
+			goto out;
+		newblock->start = start;
+		newblock->size = p->size - (start - p->start);
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size -= newblock->size;
+		p = newblock;
+	}
+
+	/* Maybe cut off the end of an existing block */
+	if (size < p->size) {
+		struct mem_block *newblock = malloc(sizeof(*newblock),
+						     DRM_MEM_DRIVER, M_NOWAIT);
+		if (!newblock)
+			goto out;
+		newblock->start = start + size;
+		newblock->size = p->size - size;
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size = size;
+	}
+
+      out:
+	/* Our block is in the middle */
+	p->file_priv = file_priv;
+	return p;
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, int size,
+				     int align2, struct drm_file *file_priv)
+{
+	struct mem_block *p;
+	int mask = (1 << align2) - 1;
+
+	list_for_each(p, heap) {
+		int start = (p->start + mask) & ~mask;
+		if (p->file_priv == NULL && start + size <= p->start + p->size)
+			return split_block(p, start, size, file_priv);
+	}
+
+	return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, int start)
+{
+	struct mem_block *p;
+
+	list_for_each(p, heap)
+	    if (p->start == start)
+		return p;
+
+	return NULL;
+}
+
+static void free_block(struct mem_block *p)
+{
+	p->file_priv = NULL;
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	if (p->next->file_priv == NULL) {
+		struct mem_block *q = p->next;
+		p->size += q->size;
+		p->next = q->next;
+		p->next->prev = p;
+		free(q, DRM_MEM_DRIVER);
+	}
+
+	if (p->prev->file_priv == NULL) {
+		struct mem_block *q = p->prev;
+		q->size += p->size;
+		q->next = p->next;
+		q->next->prev = q;
+		free(p, DRM_MEM_DRIVER);
+	}
+}
+
+/* Initialize.  How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+	struct mem_block *blocks = malloc(sizeof(*blocks),
+	    DRM_MEM_DRIVER, M_NOWAIT);
+
+	if (!blocks)
+		return -ENOMEM;
+
+	*heap = malloc(sizeof(**heap), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!*heap) {
+		free(blocks, DRM_MEM_DRIVER);
+		return -ENOMEM;
+	}
+
+	blocks->start = start;
+	blocks->size = size;
+	blocks->file_priv = NULL;
+	blocks->next = blocks->prev = *heap;
+
+	(*heap)->file_priv = (struct drm_file *) - 1;
+	(*heap)->next = (*heap)->prev = blocks;
+	return 0;
+}
+
+/* Free all blocks associated with the releasing file.
+ */
+void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
+{
+	struct mem_block *p;
+
+	if (!heap || !heap->next)
+		return;
+
+	list_for_each(p, heap) {
+		if (p->file_priv == file_priv)
+			p->file_priv = NULL;
+	}
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	list_for_each(p, heap) {
+		while (p->file_priv == NULL && p->next->file_priv == NULL) {
+			struct mem_block *q = p->next;
+			p->size += q->size;
+			p->next = q->next;
+			p->next->prev = p;
+			free(q, DRM_MEM_DRIVER);
+		}
+	}
+}
+
+/* Shutdown.
+ */
+void radeon_mem_takedown(struct mem_block **heap)
+{
+	struct mem_block *p;
+
+	if (!*heap)
+		return;
+
+	for (p = (*heap)->next; p != *heap;) {
+		struct mem_block *q = p;
+		p = p->next;
+		free(q, DRM_MEM_DRIVER);
+	}
+
+	free(*heap, DRM_MEM_DRIVER);
+	*heap = NULL;
+}
+
+/* IOCTL HANDLERS */
+
+static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
+{
+	switch (region) {
+	case RADEON_MEM_REGION_GART:
+		return &dev_priv->gart_heap;
+	case RADEON_MEM_REGION_FB:
+		return &dev_priv->fb_heap;
+	default:
+		return NULL;
+	}
+}
+
+int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_alloc_t *alloc = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, alloc->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	/* Make things easier on ourselves: all allocations at least
+	 * 4k aligned.
+	 */
+	if (alloc->alignment < 12)
+		alloc->alignment = 12;
+
+	block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
+
+	if (!block)
+		return -ENOMEM;
+
+	if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+			     sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_free_t *memfree = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, memfree->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	block = find_block(*heap, memfree->region_offset);
+	if (!block)
+		return -EFAULT;
+
+	if (block->file_priv != file_priv)
+		return -EPERM;
+
+	free_block(block);
+	return 0;
+}
+
+int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_init_heap_t *initheap = data;
+	struct mem_block **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, initheap->region);
+	if (!heap)
+		return -EFAULT;
+
+	if (*heap) {
+		DRM_ERROR("heap already initialized?");
+		return -EFAULT;
+	}
+
+	return init_heap(heap, initheap->start, initheap->size);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_mem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_mode.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_mode.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_mode.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,739 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Original Authors:
+ *   Kevin E. Martin, Rickard E. Faith, Alan Hourihane
+ *
+ * Kernel port Author: Dave Airlie
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_mode.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#ifndef RADEON_MODE_H
+#define RADEON_MODE_H
+
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/drm_dp_helper.h>
+#include <dev/drm2/drm_fixed.h>
+#include <dev/drm2/drm_crtc_helper.h>
+
+struct radeon_bo;
+struct radeon_device;
+
+#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
+#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
+#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
+#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+
+enum radeon_rmx_type {
+	RMX_OFF,
+	RMX_FULL,
+	RMX_CENTER,
+	RMX_ASPECT
+};
+
+enum radeon_tv_std {
+	TV_STD_NTSC,
+	TV_STD_PAL,
+	TV_STD_PAL_M,
+	TV_STD_PAL_60,
+	TV_STD_NTSC_J,
+	TV_STD_SCART_PAL,
+	TV_STD_SECAM,
+	TV_STD_PAL_CN,
+	TV_STD_PAL_N,
+};
+
+enum radeon_underscan_type {
+	UNDERSCAN_OFF,
+	UNDERSCAN_ON,
+	UNDERSCAN_AUTO,
+};
+
+enum radeon_hpd_id {
+	RADEON_HPD_1 = 0,
+	RADEON_HPD_2,
+	RADEON_HPD_3,
+	RADEON_HPD_4,
+	RADEON_HPD_5,
+	RADEON_HPD_6,
+	RADEON_HPD_NONE = 0xff,
+};
+
+#define RADEON_MAX_I2C_BUS 16
+
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ *    grabs the gpio pins for software use
+ *    0=not held  1=held
+ * 2. "a" reg and bits
+ *    output pin value
+ *    0=low 1=high
+ * 3. "en" reg and bits
+ *    sets the pin direction
+ *    0=input 1=output
+ * 4. "y" reg and bits
+ *    input pin value
+ *    0=low 1=high
+ */
+struct radeon_i2c_bus_rec {
+	bool valid;
+	/* id used by atom */
+	uint8_t i2c_id;
+	/* id used by atom */
+	enum radeon_hpd_id hpd;
+	/* can be used with hw i2c engine */
+	bool hw_capable;
+	/* uses multi-media i2c engine */
+	bool mm_i2c;
+	/* regs and bits */
+	uint32_t mask_clk_reg;
+	uint32_t mask_data_reg;
+	uint32_t a_clk_reg;
+	uint32_t a_data_reg;
+	uint32_t en_clk_reg;
+	uint32_t en_data_reg;
+	uint32_t y_clk_reg;
+	uint32_t y_data_reg;
+	uint32_t mask_clk_mask;
+	uint32_t mask_data_mask;
+	uint32_t a_clk_mask;
+	uint32_t a_data_mask;
+	uint32_t en_clk_mask;
+	uint32_t en_data_mask;
+	uint32_t y_clk_mask;
+	uint32_t y_data_mask;
+};
+
+struct radeon_tmds_pll {
+    uint32_t freq;
+    uint32_t value;
+};
+
+#define RADEON_MAX_BIOS_CONNECTOR 16
+
+/* pll flags */
+#define RADEON_PLL_USE_BIOS_DIVS        (1 << 0)
+#define RADEON_PLL_NO_ODD_POST_DIV      (1 << 1)
+#define RADEON_PLL_USE_REF_DIV          (1 << 2)
+#define RADEON_PLL_LEGACY               (1 << 3)
+#define RADEON_PLL_PREFER_LOW_REF_DIV   (1 << 4)
+#define RADEON_PLL_PREFER_HIGH_REF_DIV  (1 << 5)
+#define RADEON_PLL_PREFER_LOW_FB_DIV    (1 << 6)
+#define RADEON_PLL_PREFER_HIGH_FB_DIV   (1 << 7)
+#define RADEON_PLL_PREFER_LOW_POST_DIV  (1 << 8)
+#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
+#define RADEON_PLL_USE_FRAC_FB_DIV      (1 << 10)
+#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
+#define RADEON_PLL_USE_POST_DIV         (1 << 12)
+#define RADEON_PLL_IS_LCD               (1 << 13)
+#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
+
+struct radeon_pll {
+	/* reference frequency */
+	uint32_t reference_freq;
+
+	/* fixed dividers */
+	uint32_t reference_div;
+	uint32_t post_div;
+
+	/* pll in/out limits */
+	uint32_t pll_in_min;
+	uint32_t pll_in_max;
+	uint32_t pll_out_min;
+	uint32_t pll_out_max;
+	uint32_t lcd_pll_out_min;
+	uint32_t lcd_pll_out_max;
+	uint32_t best_vco;
+
+	/* divider limits */
+	uint32_t min_ref_div;
+	uint32_t max_ref_div;
+	uint32_t min_post_div;
+	uint32_t max_post_div;
+	uint32_t min_feedback_div;
+	uint32_t max_feedback_div;
+	uint32_t min_frac_feedback_div;
+	uint32_t max_frac_feedback_div;
+
+	/* flags for the current clock */
+	uint32_t flags;
+
+	/* pll id */
+	uint32_t id;
+};
+
+struct radeon_i2c_chan {
+	device_t adapter;
+	device_t iic_bus;
+	struct drm_device *dev;
+	struct radeon_i2c_bus_rec rec;
+	char   name[48];
+};
+
+/* mostly for macs, but really any system without connector tables */
+enum radeon_connector_table {
+	CT_NONE = 0,
+	CT_GENERIC,
+	CT_IBOOK,
+	CT_POWERBOOK_EXTERNAL,
+	CT_POWERBOOK_INTERNAL,
+	CT_POWERBOOK_VGA,
+	CT_MINI_EXTERNAL,
+	CT_MINI_INTERNAL,
+	CT_IMAC_G5_ISIGHT,
+	CT_EMAC,
+	CT_RN50_POWER,
+	CT_MAC_X800,
+	CT_MAC_G5_9600,
+	CT_SAM440EP,
+	CT_MAC_G4_SILVER
+};
+
+enum radeon_dvo_chip {
+	DVO_SIL164,
+	DVO_SIL1178,
+};
+
+struct radeon_fbdev;
+
+struct radeon_afmt {
+	bool enabled;
+	int offset;
+	bool last_buffer_filled_status;
+	int id;
+};
+
+struct radeon_mode_info {
+	struct atom_context *atom_context;
+	struct card_info *atom_card_info;
+	enum radeon_connector_table connector_table;
+	bool mode_config_initialized;
+	struct radeon_crtc *crtcs[6];
+	struct radeon_afmt *afmt[6];
+	/* DVI-I properties */
+	struct drm_property *coherent_mode_property;
+	/* DAC enable load detect */
+	struct drm_property *load_detect_property;
+	/* TV standard */
+	struct drm_property *tv_std_property;
+	/* legacy TMDS PLL detect */
+	struct drm_property *tmds_pll_property;
+	/* underscan */
+	struct drm_property *underscan_property;
+	struct drm_property *underscan_hborder_property;
+	struct drm_property *underscan_vborder_property;
+	/* hardcoded DFP edid from BIOS */
+	struct edid *bios_hardcoded_edid;
+	int bios_hardcoded_edid_size;
+
+	/* pointer to fbdev info structure */
+	struct radeon_fbdev *rfbdev;
+	/* firmware flags */
+	u16 firmware_flags;
+	/* pointer to backlight encoder */
+	struct radeon_encoder *bl_encoder;
+};
+
+#define RADEON_MAX_BL_LEVEL 0xFF
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+struct radeon_backlight_privdata {
+	struct radeon_encoder *encoder;
+	uint8_t negative;
+};
+
+#endif
+
+#define MAX_H_CODE_TIMING_LEN 32
+#define MAX_V_CODE_TIMING_LEN 32
+
+/* need to store these as reading
+   back code tables is excessive */
+struct radeon_tv_regs {
+	uint32_t tv_uv_adr;
+	uint32_t timing_cntl;
+	uint32_t hrestart;
+	uint32_t vrestart;
+	uint32_t frestart;
+	uint16_t h_code_timing[MAX_H_CODE_TIMING_LEN];
+	uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
+};
+
+struct radeon_atom_ss {
+	uint16_t percentage;
+	uint8_t type;
+	uint16_t step;
+	uint8_t delay;
+	uint8_t range;
+	uint8_t refdiv;
+	/* asic_ss */
+	uint16_t rate;
+	uint16_t amount;
+};
+
+struct radeon_crtc {
+	struct drm_crtc base;
+	int crtc_id;
+	u16 lut_r[256], lut_g[256], lut_b[256];
+	bool enabled;
+	bool can_tile;
+	bool in_mode_set;
+	uint32_t crtc_offset;
+	struct drm_gem_object *cursor_bo;
+	uint64_t cursor_addr;
+	int cursor_width;
+	int cursor_height;
+	uint32_t legacy_display_base_addr;
+	uint32_t legacy_cursor_offset;
+	enum radeon_rmx_type rmx_type;
+	u8 h_border;
+	u8 v_border;
+	fixed20_12 vsc;
+	fixed20_12 hsc;
+	struct drm_display_mode native_mode;
+	int pll_id;
+	/* page flipping */
+	struct radeon_unpin_work *unpin_work;
+	int deferred_flip_completion;
+	/* pll sharing */
+	struct radeon_atom_ss ss;
+	bool ss_enabled;
+	u32 adjusted_clock;
+	int bpc;
+	u32 pll_reference_div;
+	u32 pll_post_div;
+	u32 pll_flags;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+};
+
+struct radeon_encoder_primary_dac {
+	/* legacy primary dac */
+	uint32_t ps2_pdac_adj;
+};
+
+struct radeon_encoder_lvds {
+	/* legacy lvds */
+	uint16_t panel_vcc_delay;
+	uint8_t  panel_pwr_delay;
+	uint8_t  panel_digon_delay;
+	uint8_t  panel_blon_delay;
+	uint16_t panel_ref_divider;
+	uint8_t  panel_post_divider;
+	uint16_t panel_fb_divider;
+	bool     use_bios_dividers;
+	uint32_t lvds_gen_cntl;
+	/* panel mode */
+	struct drm_display_mode native_mode;
+	struct backlight_device *bl_dev;
+	int      dpms_mode;
+	uint8_t  backlight_level;
+};
+
+struct radeon_encoder_tv_dac {
+	/* legacy tv dac */
+	uint32_t ps2_tvdac_adj;
+	uint32_t ntsc_tvdac_adj;
+	uint32_t pal_tvdac_adj;
+
+	int               h_pos;
+	int               v_pos;
+	int               h_size;
+	int               supported_tv_stds;
+	bool              tv_on;
+	enum radeon_tv_std tv_std;
+	struct radeon_tv_regs tv;
+};
+
+struct radeon_encoder_int_tmds {
+	/* legacy int tmds */
+	struct radeon_tmds_pll tmds_pll[4];
+};
+
+struct radeon_encoder_ext_tmds {
+	/* tmds over dvo */
+	struct radeon_i2c_chan *i2c_bus;
+	uint8_t slave_addr;
+	enum radeon_dvo_chip dvo_chip;
+};
+
+/* spread spectrum */
+struct radeon_encoder_atom_dig {
+	bool linkb;
+	/* atom dig */
+	bool coherent_mode;
+	int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+	/* atom lvds/edp */
+	uint32_t lcd_misc;
+	uint16_t panel_pwr_delay;
+	uint32_t lcd_ss_id;
+	/* panel mode */
+	struct drm_display_mode native_mode;
+	struct backlight_device *bl_dev;
+	int dpms_mode;
+	uint8_t backlight_level;
+	int panel_mode;
+	struct radeon_afmt *afmt;
+};
+
+struct radeon_encoder_atom_dac {
+	enum radeon_tv_std tv_std;
+};
+
+struct radeon_encoder {
+	struct drm_encoder base;
+	uint32_t encoder_enum;
+	uint32_t encoder_id;
+	uint32_t devices;
+	uint32_t active_device;
+	uint32_t flags;
+	uint32_t pixel_clock;
+	enum radeon_rmx_type rmx_type;
+	enum radeon_underscan_type underscan_type;
+	uint32_t underscan_hborder;
+	uint32_t underscan_vborder;
+	struct drm_display_mode native_mode;
+	void *enc_priv;
+	int audio_polling_active;
+	bool is_ext_encoder;
+	u16 caps;
+};
+
+struct radeon_connector_atom_dig {
+	uint32_t igp_lane_info;
+	/* displayport */
+	struct radeon_i2c_chan *dp_i2c_bus;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 dp_sink_type;
+	int dp_clock;
+	int dp_lane_count;
+	bool edp_on;
+};
+
+struct radeon_gpio_rec {
+	bool valid;
+	u8 id;
+	u32 reg;
+	u32 mask;
+};
+
+struct radeon_hpd {
+	enum radeon_hpd_id hpd;
+	u8 plugged_state;
+	struct radeon_gpio_rec gpio;
+};
+
+struct radeon_router {
+	u32 router_id;
+	struct radeon_i2c_bus_rec i2c_info;
+	u8 i2c_addr;
+	/* i2c mux */
+	bool ddc_valid;
+	u8 ddc_mux_type;
+	u8 ddc_mux_control_pin;
+	u8 ddc_mux_state;
+	/* clock/data mux */
+	bool cd_valid;
+	u8 cd_mux_type;
+	u8 cd_mux_control_pin;
+	u8 cd_mux_state;
+};
+
+struct radeon_connector {
+	struct drm_connector base;
+	uint32_t connector_id;
+	uint32_t devices;
+	struct radeon_i2c_chan *ddc_bus;
+	/* some systems have an hdmi and vga port with a shared ddc line */
+	bool shared_ddc;
+	bool use_digital;
+	/* we need to mind the EDID between detect
+	   and get modes due to analog/digital/tvencoder */
+	struct edid *edid;
+	void *con_priv;
+	bool dac_load_detect;
+	bool detected_by_load; /* if the connection status was determined by load */
+	uint16_t connector_object_id;
+	struct radeon_hpd hpd;
+	struct radeon_router router;
+	struct radeon_i2c_chan *router_bus;
+};
+
+struct radeon_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
+				((em) == ATOM_ENCODER_MODE_DP_MST))
+
+extern enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev);
+extern enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev);
+
+extern struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder);
+extern struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
+extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+				    u32 pixel_clock);
+
+extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
+extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
+extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
+extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
+extern int radeon_get_monitor_bpc(struct drm_connector *connector);
+
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+				       struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+				      const struct drm_display_mode *mode);
+extern void radeon_dp_link_train(struct drm_encoder *encoder,
+				 struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+				    struct drm_connector *connector);
+extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
+extern void radeon_atom_encoder_init(struct radeon_device *rdev);
+extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+					   int action, uint8_t lane_num,
+					   uint8_t lane_set);
+extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
+extern int radeon_dp_i2c_aux_ch(device_t dev, int mode,
+				u8 write_byte, u8 *read_byte);
+
+extern void radeon_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_fini(struct radeon_device *rdev);
+extern void radeon_combios_i2c_init(struct radeon_device *rdev);
+extern void radeon_atombios_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_add(struct radeon_device *rdev,
+			   struct radeon_i2c_bus_rec *rec,
+			   const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+						 struct radeon_i2c_bus_rec *i2c_bus);
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+						    struct radeon_i2c_bus_rec *rec,
+						    const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+						 struct radeon_i2c_bus_rec *rec,
+						 const char *name);
+extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+				u8 slave_addr,
+				u8 addr,
+				u8 *val);
+extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+				u8 slave_addr,
+				u8 addr,
+				u8 val);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
+extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+
+extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+					     struct radeon_atom_ss *ss,
+					     int id);
+extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+					     struct radeon_atom_ss *ss,
+					     int id, u32 clock);
+
+extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
+				      uint64_t freq,
+				      uint32_t *dot_clock_p,
+				      uint32_t *fb_div_p,
+				      uint32_t *frac_fb_div_p,
+				      uint32_t *ref_div_p,
+				      uint32_t *post_div_p);
+
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+				     u32 freq,
+				     u32 *dot_clock_p,
+				     u32 *fb_div_p,
+				     u32 *frac_fb_div_p,
+				     u32 *ref_div_p,
+				     u32 *post_div_p);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
+struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
+extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
+extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
+extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
+
+extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
+extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				   struct drm_framebuffer *old_fb);
+extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 int x, int y,
+					 enum mode_set_atomic state);
+extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode,
+				   int x, int y,
+				   struct drm_framebuffer *old_fb);
+extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
+
+extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				 struct drm_framebuffer *old_fb);
+extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+				       struct drm_framebuffer *fb,
+				       int x, int y,
+				       enum mode_set_atomic state);
+extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+				   struct drm_framebuffer *fb,
+				   int x, int y, int atomic);
+extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+				  struct drm_file *file_priv,
+				  uint32_t handle,
+				  uint32_t width,
+				  uint32_t height);
+extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+				   int x, int y);
+
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      int *vpos, int *hpos);
+
+extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
+extern struct edid *
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
+extern bool radeon_atom_get_clock_info(struct drm_device *dev);
+extern bool radeon_combios_get_clock_info(struct drm_device *dev);
+extern struct radeon_encoder_atom_dig *
+radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+					  struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+						     struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+						   struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+							 struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						       struct radeon_encoder_ext_tmds *tmds);
+extern struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_lvds *
+radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
+extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_primary_dac *
+radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_save_bios_scratch_regs(struct radeon_device *rdev);
+extern void radeon_restore_bios_scratch_regs(struct radeon_device *rdev);
+extern void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				     u16 blue, int regno);
+extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				     u16 *blue, int regno);
+int radeon_framebuffer_init(struct drm_device *dev,
+			     struct radeon_framebuffer *rfb,
+			     struct drm_mode_fb_cmd2 *mode_cmd,
+			     struct drm_gem_object *obj);
+
+int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
+void radeon_atombios_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc);
+void radeon_legacy_init_crtc(struct drm_device *dev,
+			     struct radeon_crtc *radeon_crtc);
+
+void radeon_get_clock_info(struct drm_device *dev);
+
+extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
+extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
+
+void radeon_enc_destroy(struct drm_encoder *encoder);
+void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
+void radeon_combios_asic_init(struct drm_device *dev);
+bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode);
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+			     struct drm_display_mode *adjusted_mode);
+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
+
+/* legacy tv */
+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
+				      uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
+				      uint32_t *v_total_disp, uint32_t *v_sync_strt_wid);
+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
+				  uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
+				  uint32_t *ppll_div_3, uint32_t *pixclks_cntl);
+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
+				  uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
+				  uint32_t *p2pll_div_0, uint32_t *pixclks_cntl);
+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_mode.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_object.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_object.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_object.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,658 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse at freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_object.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+#ifdef FREEBSD_WIP
+#include "radeon_trace.h"
+#endif /* FREEBSD_WIP */
+
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+int radeon_ttm_init(struct radeon_device *rdev);
+void radeon_ttm_fini(struct radeon_device *rdev);
+#endif
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
+
+/*
+ * To exclude mutual BO access we rely on bo_reserve exclusion, as all
+ * function are calling it.
+ */
+
+static void radeon_bo_clear_va(struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va, *tmp;
+
+	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+		/* remove from all vm address space */
+		radeon_vm_bo_rmv(bo->rdev, bo_va);
+	}
+}
+
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct radeon_bo *bo;
+
+	bo = container_of(tbo, struct radeon_bo, tbo);
+	sx_xlock(&bo->rdev->gem.mutex);
+	list_del_init(&bo->list);
+	sx_xunlock(&bo->rdev->gem.mutex);
+	radeon_bo_clear_surface_reg(bo);
+	radeon_bo_clear_va(bo);
+	drm_gem_object_release(&bo->gem_base);
+	free(bo, DRM_MEM_DRIVER);
+}
+
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &radeon_ttm_bo_destroy)
+		return true;
+	return false;
+}
+
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+{
+	u32 c = 0;
+
+	rbo->placement.fpfn = 0;
+	rbo->placement.lpfn = 0;
+	rbo->placement.placement = rbo->placements;
+	rbo->placement.busy_placement = rbo->placements;
+	if (domain & RADEON_GEM_DOMAIN_VRAM)
+		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+					TTM_PL_FLAG_VRAM;
+	if (domain & RADEON_GEM_DOMAIN_GTT) {
+		if (rbo->rdev->flags & RADEON_IS_AGP) {
+			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+		} else {
+			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+		}
+	}
+	if (domain & RADEON_GEM_DOMAIN_CPU) {
+		if (rbo->rdev->flags & RADEON_IS_AGP) {
+			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+		} else {
+			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+		}
+	}
+	if (!c)
+		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	rbo->placement.num_placement = c;
+	rbo->placement.num_busy_placement = c;
+}
+
+int radeon_bo_create(struct radeon_device *rdev,
+		     unsigned long size, int byte_align, bool kernel, u32 domain,
+		     struct sg_table *sg, struct radeon_bo **bo_ptr)
+{
+	struct radeon_bo *bo;
+	enum ttm_bo_type type;
+	unsigned long page_align = roundup2(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+	size_t acc_size;
+	int r;
+
+	size = roundup2(size, PAGE_SIZE);
+
+	if (kernel) {
+		type = ttm_bo_type_kernel;
+	} else if (sg) {
+		type = ttm_bo_type_sg;
+	} else {
+		type = ttm_bo_type_device;
+	}
+	*bo_ptr = NULL;
+
+	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+				       sizeof(struct radeon_bo));
+
+	bo = malloc(sizeof(struct radeon_bo),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (bo == NULL)
+		return -ENOMEM;
+	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
+	if (unlikely(r)) {
+		free(bo, DRM_MEM_DRIVER);
+		return r;
+	}
+	bo->rdev = rdev;
+	bo->gem_base.driver_private = NULL;
+	bo->surface_reg = -1;
+	INIT_LIST_HEAD(&bo->list);
+	INIT_LIST_HEAD(&bo->va);
+	radeon_ttm_placement_from_domain(bo, domain);
+	/* Kernel allocation are uninterruptible */
+	sx_slock(&rdev->pm.mclk_lock);
+	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, page_align, !kernel, NULL,
+			acc_size, sg, &radeon_ttm_bo_destroy);
+	sx_sunlock(&rdev->pm.mclk_lock);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	*bo_ptr = bo;
+
+#ifdef FREEBSD_WIP
+	trace_radeon_bo_create(bo);
+#endif /* FREEBSD_WIP */
+
+	return 0;
+}
+
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
+{
+	bool is_iomem;
+	int r;
+
+	if (bo->kptr) {
+		if (ptr) {
+			*ptr = bo->kptr;
+		}
+		return 0;
+	}
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r) {
+		return r;
+	}
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ptr) {
+		*ptr = bo->kptr;
+	}
+	radeon_bo_check_tiling(bo, 0, 0);
+	return 0;
+}
+
+void radeon_bo_kunmap(struct radeon_bo *bo)
+{
+	if (bo->kptr == NULL)
+		return;
+	bo->kptr = NULL;
+	radeon_bo_check_tiling(bo, 0, 0);
+	ttm_bo_kunmap(&bo->kmap);
+}
+
+void radeon_bo_unref(struct radeon_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+	struct radeon_device *rdev;
+
+	if ((*bo) == NULL)
+		return;
+	rdev = (*bo)->rdev;
+	tbo = &((*bo)->tbo);
+	sx_slock(&rdev->pm.mclk_lock);
+	ttm_bo_unref(&tbo);
+	sx_sunlock(&rdev->pm.mclk_lock);
+	if (tbo == NULL)
+		*bo = NULL;
+}
+
+int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
+			     u64 *gpu_addr)
+{
+	int r, i;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
+
+		if (max_offset != 0) {
+			u64 domain_start;
+
+			if (domain == RADEON_GEM_DOMAIN_VRAM)
+				domain_start = bo->rdev->mc.vram_start;
+			else
+				domain_start = bo->rdev->mc.gtt_start;
+			if (max_offset < (radeon_bo_gpu_offset(bo) - domain_start)) {
+				DRM_ERROR("radeon_bo_pin_restricted: "
+				    "max_offset(%ju) < "
+				    "(radeon_bo_gpu_offset(%ju) - "
+				    "domain_start(%ju)",
+				    (uintmax_t)max_offset, (uintmax_t)radeon_bo_gpu_offset(bo),
+				    (uintmax_t)domain_start);
+			}
+		}
+
+		return 0;
+	}
+	radeon_ttm_placement_from_domain(bo, domain);
+	if (domain == RADEON_GEM_DOMAIN_VRAM) {
+		/* force to pin into visible video ram */
+		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+	}
+	if (max_offset) {
+		u64 lpfn = max_offset >> PAGE_SHIFT;
+
+		if (!bo->placement.lpfn)
+			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+
+		if (lpfn < bo->placement.lpfn)
+			bo->placement.lpfn = lpfn;
+	}
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+		if (gpu_addr != NULL)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
+	}
+	if (unlikely(r != 0))
+		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+	return r;
+}
+
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+{
+	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
+}
+
+int radeon_bo_unpin(struct radeon_bo *bo)
+{
+	int r, i;
+
+	if (!bo->pin_count) {
+		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (unlikely(r != 0))
+		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+	return r;
+}
+
+int radeon_bo_evict_vram(struct radeon_device *rdev)
+{
+	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+	if (0 && (rdev->flags & RADEON_IS_IGP)) {
+		if (rdev->mc.igp_sideport_enabled == false)
+			/* Useless to evict on IGP chips */
+			return 0;
+	}
+	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+}
+
+void radeon_bo_force_delete(struct radeon_device *rdev)
+{
+	struct radeon_bo *bo, *n;
+
+	if (list_empty(&rdev->gem.objects)) {
+		return;
+	}
+	dev_err(rdev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+		DRM_LOCK(rdev->ddev);
+		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+			*((unsigned long *)&bo->gem_base.refcount));
+		sx_xlock(&bo->rdev->gem.mutex);
+		list_del_init(&bo->list);
+		sx_xunlock(&bo->rdev->gem.mutex);
+		/* this should unref the ttm bo */
+		drm_gem_object_unreference(&bo->gem_base);
+		DRM_UNLOCK(rdev->ddev);
+	}
+}
+
+int radeon_bo_init(struct radeon_device *rdev)
+{
+	/* Add an MTRR for the VRAM */
+	rdev->mc.vram_mtrr = drm_mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+			DRM_MTRR_WC);
+	DRM_INFO("Detected VRAM RAM=%juM, BAR=%juM\n",
+		(uintmax_t)rdev->mc.mc_vram_size >> 20,
+		(uintmax_t)rdev->mc.aper_size >> 20);
+	DRM_INFO("RAM width %dbits %cDR\n",
+			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
+	return radeon_ttm_init(rdev);
+}
+
+void radeon_bo_fini(struct radeon_device *rdev)
+{
+	radeon_ttm_fini(rdev);
+}
+
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+				struct list_head *head)
+{
+	if (lobj->wdomain) {
+		list_add(&lobj->tv.head, head);
+	} else {
+		list_add_tail(&lobj->tv.head, head);
+	}
+}
+
+int radeon_bo_list_validate(struct list_head *head)
+{
+	struct radeon_bo_list *lobj;
+	struct radeon_bo *bo;
+	u32 domain;
+	int r;
+
+	r = ttm_eu_reserve_buffers(head);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	list_for_each_entry(lobj, head, tv.head) {
+		bo = lobj->bo;
+		if (!bo->pin_count) {
+			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+			
+		retry:
+			radeon_ttm_placement_from_domain(bo, domain);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement,
+						true, false);
+			if (unlikely(r)) {
+				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
+					domain |= RADEON_GEM_DOMAIN_GTT;
+					goto retry;
+				}
+				return r;
+			}
+		}
+		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+		lobj->tiling_flags = bo->tiling_flags;
+	}
+	return 0;
+}
+
+#ifdef FREEBSD_WIP
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+			     struct vm_area_struct *vma)
+{
+	return ttm_fbdev_mmap(vma, &bo->tbo);
+}
+#endif /* FREEBSD_WIP */
+
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
+{
+	struct radeon_device *rdev = bo->rdev;
+	struct radeon_surface_reg *reg;
+	struct radeon_bo *old_object;
+	int steal;
+	int i;
+
+	KASSERT(radeon_bo_is_reserved(bo),
+	    ("radeon_bo_get_surface_reg: radeon_bo is not reserved"));
+
+	if (!bo->tiling_flags)
+		return 0;
+
+	if (bo->surface_reg >= 0) {
+		reg = &rdev->surface_regs[bo->surface_reg];
+		i = bo->surface_reg;
+		goto out;
+	}
+
+	steal = -1;
+	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+
+		reg = &rdev->surface_regs[i];
+		if (!reg->bo)
+			break;
+
+		old_object = reg->bo;
+		if (old_object->pin_count == 0)
+			steal = i;
+	}
+
+	/* if we are all out */
+	if (i == RADEON_GEM_MAX_SURFACES) {
+		if (steal == -1)
+			return -ENOMEM;
+		/* find someone with a surface reg and nuke their BO */
+		reg = &rdev->surface_regs[steal];
+		old_object = reg->bo;
+		/* blow away the mapping */
+		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
+		ttm_bo_unmap_virtual(&old_object->tbo);
+		old_object->surface_reg = -1;
+		i = steal;
+	}
+
+	bo->surface_reg = i;
+	reg->bo = bo;
+
+out:
+	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+			       bo->tbo.mem.start << PAGE_SHIFT,
+			       bo->tbo.num_pages << PAGE_SHIFT);
+	return 0;
+}
+
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
+{
+	struct radeon_device *rdev = bo->rdev;
+	struct radeon_surface_reg *reg;
+
+	if (bo->surface_reg == -1)
+		return;
+
+	reg = &rdev->surface_regs[bo->surface_reg];
+	radeon_clear_surface_reg(rdev, bo->surface_reg);
+
+	reg->bo = NULL;
+	bo->surface_reg = -1;
+}
+
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				uint32_t tiling_flags, uint32_t pitch)
+{
+	struct radeon_device *rdev = bo->rdev;
+	int r;
+
+	if (rdev->family >= CHIP_CEDAR) {
+		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
+
+		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
+		switch (bankw) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		switch (bankh) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		switch (mtaspect) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		if (tilesplit > 6) {
+			return -EINVAL;
+		}
+		if (stilesplit > 6) {
+			return -EINVAL;
+		}
+	}
+	r = radeon_bo_reserve(bo, false);
+	if (unlikely(r != 0))
+		return r;
+	bo->tiling_flags = tiling_flags;
+	bo->pitch = pitch;
+	radeon_bo_unreserve(bo);
+	return 0;
+}
+
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				uint32_t *tiling_flags,
+				uint32_t *pitch)
+{
+	KASSERT(radeon_bo_is_reserved(bo),
+	    ("radeon_bo_get_tiling_flags: radeon_bo is not reserved"));
+	if (tiling_flags)
+		*tiling_flags = bo->tiling_flags;
+	if (pitch)
+		*pitch = bo->pitch;
+}
+
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop)
+{
+	KASSERT((radeon_bo_is_reserved(bo) || force_drop),
+	    ("radeon_bo_check_tiling: radeon_bo is not reserved && !force_drop"));
+
+	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
+		return 0;
+
+	if (force_drop) {
+		radeon_bo_clear_surface_reg(bo);
+		return 0;
+	}
+
+	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+		if (!has_moved)
+			return 0;
+
+		if (bo->surface_reg >= 0)
+			radeon_bo_clear_surface_reg(bo);
+		return 0;
+	}
+
+	if ((bo->surface_reg >= 0) && !has_moved)
+		return 0;
+
+	return radeon_bo_get_surface_reg(bo);
+}
+
+void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+			   struct ttm_mem_reg *mem)
+{
+	struct radeon_bo *rbo;
+	if (!radeon_ttm_bo_is_radeon_bo(bo))
+		return;
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 1);
+	radeon_vm_bo_invalidate(rbo->rdev, rbo);
+}
+
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	struct radeon_device *rdev;
+	struct radeon_bo *rbo;
+	unsigned long offset, size;
+	int r;
+
+	if (!radeon_ttm_bo_is_radeon_bo(bo))
+		return 0;
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 0);
+	rdev = rbo->rdev;
+	if (bo->mem.mem_type == TTM_PL_VRAM) {
+		size = bo->mem.num_pages << PAGE_SHIFT;
+		offset = bo->mem.start << PAGE_SHIFT;
+		if ((offset + size) > rdev->mc.visible_vram_size) {
+			/* hurrah the memory is not visible ! */
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+			r = ttm_bo_validate(bo, &rbo->placement, false, false);
+			if (unlikely(r != 0))
+				return r;
+			offset = bo->mem.start << PAGE_SHIFT;
+			/* this should not happen */
+			if ((offset + size) > rdev->mc.visible_vram_size)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0))
+		return r;
+	mtx_lock(&bo->tbo.bdev->fence_lock);
+	if (mem_type)
+		*mem_type = bo->tbo.mem.mem_type;
+	if (bo->tbo.sync_obj)
+		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	mtx_unlock(&bo->tbo.bdev->fence_lock);
+	ttm_bo_unreserve(&bo->tbo);
+	return r;
+}
+
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo:		bo structure
+ * @no_intr:	don't return -ERESTARTSYS on pending signal
+ *
+ * Returns:
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+		return r;
+	}
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_object.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_object.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_object.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_object.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,194 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_object.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#ifndef __RADEON_OBJECT_H__
+#define __RADEON_OBJECT_H__
+
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon.h"
+
+/*
+ * Undefine max_offset (defined in vm/vm_map.h), because it conflicts
+ * with an argument of the function radeon_bo_pin_restricted().
+ */
+#undef max_offset
+
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type:	ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+	switch (mem_type) {
+	case TTM_PL_VRAM:
+		return RADEON_GEM_DOMAIN_VRAM;
+	case TTM_PL_TT:
+		return RADEON_GEM_DOMAIN_GTT;
+	case TTM_PL_SYSTEM:
+		return RADEON_GEM_DOMAIN_CPU;
+	default:
+		break;
+	}
+	return 0;
+}
+
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+	return ttm_bo_is_reserved(&bo->tbo);
+}
+
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.addr_space_offset;
+}
+
+extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+			  bool no_wait);
+
+extern int radeon_bo_create(struct radeon_device *rdev,
+			    unsigned long size, int byte_align,
+			    bool kernel, u32 domain,
+			    struct sg_table *sg,
+			    struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
+				    u64 max_offset, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+				struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head);
+#ifdef FREEBSD_WIP
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+				struct vm_area_struct *vma);
+#endif /* FREEBSD_WIP */
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+					struct ttm_mem_reg *mem);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+
+/*
+ * sub allocation
+ */
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+	return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+	return (char *)sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+				     struct radeon_sa_manager *sa_manager,
+				     unsigned size, u32 domain);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+				      struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+				      struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+					struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+			    struct radeon_sa_manager *sa_manager,
+			    struct radeon_sa_bo **sa_bo,
+			    unsigned size, unsigned align, bool block);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+			      struct radeon_sa_bo **sa_bo,
+			      struct radeon_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+					 struct seq_file *m);
+#endif
+
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_object.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_pm.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_pm.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_pm.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,921 @@
+/* $MidnightBSD$ */
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rafał Miłecki <zajec5 at gmail.com>
+ *          Alex Deucher <alexdeucher at gmail.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_pm.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "avivod.h"
+#include "atom.h"
+
+#define RADEON_IDLE_LOOP_MS 100
+#define RADEON_RECLOCK_DELAY_MS 200
+#define RADEON_WAIT_VBLANK_TIMEOUT 200
+
+static const char *radeon_pm_state_type_name[5] = {
+	"",
+	"Powersave",
+	"Battery",
+	"Balanced",
+	"Performance",
+};
+
+#ifdef FREEBSD_WIP
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+#endif /* FREEBSD_WIP */
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
+static void radeon_pm_set_clocks(struct radeon_device *rdev);
+
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+			     enum radeon_pm_state_type ps_type,
+			     int instance)
+{
+	int i;
+	int found_instance = -1;
+
+	for (i = 0; i < rdev->pm.num_power_states; i++) {
+		if (rdev->pm.power_state[i].type == ps_type) {
+			found_instance++;
+			if (found_instance == instance)
+				return i;
+		}
+	}
+	/* return default if no match */
+	return rdev->pm.default_power_state_index;
+}
+
+void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
+{
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		if (rdev->pm.profile == PM_PROFILE_AUTO) {
+			sx_xlock(&rdev->pm.mutex);
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+			sx_xunlock(&rdev->pm.mutex);
+		}
+	}
+}
+
+static void radeon_pm_update_profile(struct radeon_device *rdev)
+{
+	switch (rdev->pm.profile) {
+	case PM_PROFILE_DEFAULT:
+		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
+		break;
+	case PM_PROFILE_AUTO:
+#ifdef FREEBSD_WIP
+		if (power_supply_is_system_supplied() > 0) {
+			if (rdev->pm.active_crtc_count > 1)
+				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+			else
+				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+		} else {
+			if (rdev->pm.active_crtc_count > 1)
+				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+			else
+				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+		}
+#endif /* FREEBSD_WIP */
+		break;
+	case PM_PROFILE_LOW:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
+		break;
+	case PM_PROFILE_MID:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+		break;
+	case PM_PROFILE_HIGH:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+		break;
+	}
+
+	if (rdev->pm.active_crtc_count == 0) {
+		rdev->pm.requested_power_state_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+		rdev->pm.requested_clock_mode_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+	} else {
+		rdev->pm.requested_power_state_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+		rdev->pm.requested_clock_mode_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+	}
+}
+
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
+{
+	struct radeon_bo *bo, *n;
+
+	if (list_empty(&rdev->gem.objects))
+		return;
+
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+			ttm_bo_unmap_virtual(&bo->tbo);
+	}
+}
+
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
+{
+	if (rdev->pm.active_crtcs) {
+		rdev->pm.vblank_sync = false;
+#ifdef FREEBSD_WIP
+		wait_event_timeout(
+			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
+			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+#endif /* FREEBSD_WIP */
+	}
+}
+
+static void radeon_set_power_state(struct radeon_device *rdev)
+{
+	u32 sclk, mclk;
+	bool misc_after = false;
+
+	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+		return;
+
+	if (radeon_gui_idle(rdev)) {
+		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			clock_info[rdev->pm.requested_clock_mode_index].sclk;
+		if (sclk > rdev->pm.default_sclk)
+			sclk = rdev->pm.default_sclk;
+
+		/* starting with BTC, there is one state that is used for both
+		 * MH and SH.  Difference is that we always use the high clock index for
+		 * mclk and vddci.
+		 */
+		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+		    (rdev->family >= CHIP_BARTS) &&
+		    rdev->pm.active_crtc_count &&
+		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
+		else
+			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+				clock_info[rdev->pm.requested_clock_mode_index].mclk;
+
+		if (mclk > rdev->pm.default_mclk)
+			mclk = rdev->pm.default_mclk;
+
+		/* upvolt before raising clocks, downvolt after lowering clocks */
+		if (sclk < rdev->pm.current_sclk)
+			misc_after = true;
+
+		radeon_sync_with_vblank(rdev);
+
+		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			if (!radeon_pm_in_vbl(rdev))
+				return;
+		}
+
+		radeon_pm_prepare(rdev);
+
+		if (!misc_after)
+			/* voltage, pcie lanes, etc.*/
+			radeon_pm_misc(rdev);
+
+		/* set engine clock */
+		if (sclk != rdev->pm.current_sclk) {
+			radeon_pm_debug_check_in_vbl(rdev, false);
+			radeon_set_engine_clock(rdev, sclk);
+			radeon_pm_debug_check_in_vbl(rdev, true);
+			rdev->pm.current_sclk = sclk;
+			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
+		}
+
+		/* set memory clock */
+		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+			radeon_pm_debug_check_in_vbl(rdev, false);
+			radeon_set_memory_clock(rdev, mclk);
+			radeon_pm_debug_check_in_vbl(rdev, true);
+			rdev->pm.current_mclk = mclk;
+			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
+		}
+
+		if (misc_after)
+			/* voltage, pcie lanes, etc.*/
+			radeon_pm_misc(rdev);
+
+		radeon_pm_finish(rdev);
+
+		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+	} else
+		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+	int i, r;
+
+	/* no need to take locks, etc. if nothing's going to change */
+	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+		return;
+
+	DRM_LOCK(rdev->ddev);
+	sx_xlock(&rdev->pm.mclk_lock);
+	sx_xlock(&rdev->ring_lock);
+
+	/* wait for the rings to drain */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		struct radeon_ring *ring = &rdev->ring[i];
+		if (!ring->ready) {
+			continue;
+		}
+		r = radeon_fence_wait_empty_locked(rdev, i);
+		if (r) {
+			/* needs a GPU reset dont reset here */
+			sx_xunlock(&rdev->ring_lock);
+			sx_xunlock(&rdev->pm.mclk_lock);
+			DRM_UNLOCK(rdev->ddev);
+			return;
+		}
+	}
+
+	radeon_unmap_vram_bos(rdev);
+
+	if (rdev->irq.installed) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->pm.active_crtcs & (1 << i)) {
+				rdev->pm.req_vblank |= (1 << i);
+				drm_vblank_get(rdev->ddev, i);
+			}
+		}
+	}
+
+	radeon_set_power_state(rdev);
+
+	if (rdev->irq.installed) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->pm.req_vblank & (1 << i)) {
+				rdev->pm.req_vblank &= ~(1 << i);
+				drm_vblank_put(rdev->ddev, i);
+			}
+		}
+	}
+
+	/* update display watermarks based on new power state */
+	radeon_update_bandwidth_info(rdev);
+	if (rdev->pm.active_crtc_count)
+		radeon_bandwidth_update(rdev);
+
+	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+	sx_xunlock(&rdev->ring_lock);
+	sx_xunlock(&rdev->pm.mclk_lock);
+	DRM_UNLOCK(rdev->ddev);
+}
+
+static void radeon_pm_print_states(struct radeon_device *rdev)
+{
+	int i, j;
+	struct radeon_power_state *power_state;
+	struct radeon_pm_clock_info *clock_info;
+
+	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
+	for (i = 0; i < rdev->pm.num_power_states; i++) {
+		power_state = &rdev->pm.power_state[i];
+		DRM_DEBUG_DRIVER("State %d: %s\n", i,
+			radeon_pm_state_type_name[power_state->type]);
+		if (i == rdev->pm.default_power_state_index)
+			DRM_DEBUG_DRIVER("\tDefault");
+		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+			DRM_DEBUG_DRIVER("\tSingle display only\n");
+		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+		for (j = 0; j < power_state->num_clock_modes; j++) {
+			clock_info = &(power_state->clock_info[j]);
+			if (rdev->flags & RADEON_IS_IGP)
+				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
+						 j,
+						 clock_info->sclk * 10);
+			else
+				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
+						 j,
+						 clock_info->sclk * 10,
+						 clock_info->mclk * 10,
+						 clock_info->voltage.voltage);
+		}
+	}
+}
+
+#ifdef FREEBSD_WIP
+static ssize_t radeon_get_pm_profile(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+	int cp = rdev->pm.profile;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(cp == PM_PROFILE_AUTO) ? "auto" :
+			(cp == PM_PROFILE_LOW) ? "low" :
+			(cp == PM_PROFILE_MID) ? "mid" :
+			(cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf,
+				     size_t count)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+
+	sx_xlock(&rdev->pm.mutex);
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		if (strncmp("default", buf, strlen("default")) == 0)
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+		else if (strncmp("auto", buf, strlen("auto")) == 0)
+			rdev->pm.profile = PM_PROFILE_AUTO;
+		else if (strncmp("low", buf, strlen("low")) == 0)
+			rdev->pm.profile = PM_PROFILE_LOW;
+		else if (strncmp("mid", buf, strlen("mid")) == 0)
+			rdev->pm.profile = PM_PROFILE_MID;
+		else if (strncmp("high", buf, strlen("high")) == 0)
+			rdev->pm.profile = PM_PROFILE_HIGH;
+		else {
+			count = -EINVAL;
+			goto fail;
+		}
+		radeon_pm_update_profile(rdev);
+		radeon_pm_set_clocks(rdev);
+	} else
+		count = -EINVAL;
+
+fail:
+	sx_xunlock(&rdev->pm.mutex);
+
+	return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+	int pm = rdev->pm.pm_method;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t count)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+
+
+	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+		sx_xlock(&rdev->pm.mutex);
+		rdev->pm.pm_method = PM_METHOD_DYNPM;
+		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+		sx_xunlock(&rdev->pm.mutex);
+	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
+		sx_xlock(&rdev->pm.mutex);
+		/* disable dynpm */
+		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+		rdev->pm.pm_method = PM_METHOD_PROFILE;
+		sx_xunlock(&rdev->pm.mutex);
+#ifdef FREEBSD_WIP
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+#endif /* FREEBSD_WIP */
+	} else {
+		count = -EINVAL;
+		goto fail;
+	}
+	radeon_pm_compute_clocks(rdev);
+fail:
+	return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+
+static ssize_t radeon_hwmon_show_temp(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+	int temp;
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_RV6XX:
+		temp = rv6xx_get_temp(rdev);
+		break;
+	case THERMAL_TYPE_RV770:
+		temp = rv770_get_temp(rdev);
+		break;
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_NI:
+		temp = evergreen_get_temp(rdev);
+		break;
+	case THERMAL_TYPE_SUMO:
+		temp = sumo_get_temp(rdev);
+		break;
+	case THERMAL_TYPE_SI:
+		temp = si_get_temp(rdev);
+		break;
+	default:
+		temp = 0;
+		break;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t radeon_hwmon_show_name(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "radeon\n");
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_name.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+	.attrs = hwmon_attributes,
+};
+#endif /* FREEBSD_WIP */
+
+static int radeon_hwmon_init(struct radeon_device *rdev)
+{
+	int err = 0;
+
+#ifdef FREEBSD_WIP
+	rdev->pm.int_hwmon_dev = NULL;
+#endif /* FREEBSD_WIP */
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_RV6XX:
+	case THERMAL_TYPE_RV770:
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_NI:
+	case THERMAL_TYPE_SUMO:
+	case THERMAL_TYPE_SI:
+		/* No support for TN yet */
+		if (rdev->family == CHIP_ARUBA)
+			return err;
+#ifdef FREEBSD_WIP
+		rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
+		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+			err = PTR_ERR(rdev->pm.int_hwmon_dev);
+			dev_err(rdev->dev,
+				"Unable to register hwmon device: %d\n", err);
+			break;
+		}
+		dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
+		err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
+					 &hwmon_attrgroup);
+		if (err) {
+			dev_err(rdev->dev,
+				"Unable to create hwmon sysfs file: %d\n", err);
+			hwmon_device_unregister(rdev->dev);
+		}
+#endif /* FREEBSD_WIP */
+		break;
+	default:
+		break;
+	}
+
+	return err;
+}
+
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+#ifdef FREEBSD_WIP
+	if (rdev->pm.int_hwmon_dev) {
+		sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
+		hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+	}
+#endif /* FREEBSD_WIP */
+}
+
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+	sx_xlock(&rdev->pm.mutex);
+	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
+			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
+	}
+	sx_xunlock(&rdev->pm.mutex);
+
+#ifdef FREEBSD_WIP
+	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+#endif /* FREEBSD_WIP */
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+	/* set up the default clocks if the MC ucode is loaded */
+	if ((rdev->family >= CHIP_BARTS) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
+	    rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+						SET_VOLTAGE_TYPE_ASIC_VDDC);
+		if (rdev->pm.default_vddci)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+						SET_VOLTAGE_TYPE_ASIC_VDDCI);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+	}
+	/* asic init will reset the default power state */
+	sx_xlock(&rdev->pm.mutex);
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	rdev->pm.current_sclk = rdev->pm.default_sclk;
+	rdev->pm.current_mclk = rdev->pm.default_mclk;
+	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+	rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+	if (rdev->pm.pm_method == PM_METHOD_DYNPM
+	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+#ifdef FREEBSD_WIP
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+#endif /* FREEBSD_WIP */
+	}
+	sx_xunlock(&rdev->pm.mutex);
+	radeon_pm_compute_clocks(rdev);
+}
+
+int radeon_pm_init(struct radeon_device *rdev)
+{
+	int ret;
+
+	/* default to profile method */
+	rdev->pm.pm_method = PM_METHOD_PROFILE;
+	rdev->pm.profile = PM_PROFILE_DEFAULT;
+	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+	rdev->pm.default_sclk = rdev->clock.default_sclk;
+	rdev->pm.default_mclk = rdev->clock.default_mclk;
+	rdev->pm.current_sclk = rdev->clock.default_sclk;
+	rdev->pm.current_mclk = rdev->clock.default_mclk;
+	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+	if (rdev->bios) {
+		if (rdev->is_atom_bios)
+			radeon_atombios_get_power_modes(rdev);
+		else
+			radeon_combios_get_power_modes(rdev);
+		radeon_pm_print_states(rdev);
+		radeon_pm_init_profile(rdev);
+		/* set up the default clocks if the MC ucode is loaded */
+		if ((rdev->family >= CHIP_BARTS) &&
+		    (rdev->family <= CHIP_CAYMAN) &&
+		    rdev->mc_fw) {
+			if (rdev->pm.default_vddc)
+				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+							SET_VOLTAGE_TYPE_ASIC_VDDC);
+			if (rdev->pm.default_vddci)
+				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+							SET_VOLTAGE_TYPE_ASIC_VDDCI);
+			if (rdev->pm.default_sclk)
+				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+			if (rdev->pm.default_mclk)
+				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+		}
+	}
+
+	/* set up the internal thermal sensor if applicable */
+	ret = radeon_hwmon_init(rdev);
+	if (ret)
+		return ret;
+
+#ifdef FREEBSD_WIP
+	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+#endif /* FREEBSD_WIP */
+
+	if (rdev->pm.num_power_states > 1) {
+		/* where's the best place to put these? */
+#ifdef FREEBSD_WIP
+		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+#endif /* FREEBSD_WIP */
+		if (ret)
+			DRM_ERROR("failed to create device file for power profile\n");
+#ifdef FREEBSD_WIP
+		ret = device_create_file(rdev->dev, &dev_attr_power_method);
+#endif /* FREEBSD_WIP */
+		if (ret)
+			DRM_ERROR("failed to create device file for power method\n");
+
+		if (radeon_debugfs_pm_init(rdev)) {
+			DRM_ERROR("Failed to register debugfs file for PM!\n");
+		}
+
+		DRM_INFO("radeon: power management initialized\n");
+	}
+
+	return 0;
+}
+
+void radeon_pm_fini(struct radeon_device *rdev)
+{
+	if (rdev->pm.num_power_states > 1) {
+		sx_xlock(&rdev->pm.mutex);
+		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			/* reset default clocks */
+			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+			radeon_pm_set_clocks(rdev);
+		}
+		sx_xunlock(&rdev->pm.mutex);
+
+#ifdef FREEBSD_WIP
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+
+		device_remove_file(rdev->dev, &dev_attr_power_profile);
+		device_remove_file(rdev->dev, &dev_attr_power_method);
+#endif /* FREEBSD_WIP */
+	}
+
+	if (rdev->pm.power_state) {
+		int i;
+		for (i = 0; i < rdev->pm.num_power_states; ++i) {
+			free(rdev->pm.power_state[i].clock_info, DRM_MEM_DRIVER);
+		}
+		free(rdev->pm.power_state, DRM_MEM_DRIVER);
+		rdev->pm.power_state = NULL;
+		rdev->pm.num_power_states = 0;
+	}
+
+	radeon_hwmon_fini(rdev);
+}
+
+void radeon_pm_compute_clocks(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+
+	if (rdev->pm.num_power_states < 2)
+		return;
+
+	sx_xlock(&rdev->pm.mutex);
+
+	rdev->pm.active_crtcs = 0;
+	rdev->pm.active_crtc_count = 0;
+	list_for_each_entry(crtc,
+		&ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+			rdev->pm.active_crtc_count++;
+		}
+	}
+
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		radeon_pm_update_profile(rdev);
+		radeon_pm_set_clocks(rdev);
+	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+			if (rdev->pm.active_crtc_count > 1) {
+				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+#ifdef FREEBSD_WIP
+					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+#endif /* FREEBSD_WIP */
+
+					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+
+					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
+				}
+			} else if (rdev->pm.active_crtc_count == 1) {
+				/* TODO: Increase clocks if needed for current mode */
+
+				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+
+#ifdef FREEBSD_WIP
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+#endif /* FREEBSD_WIP */
+				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+#ifdef FREEBSD_WIP
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+#endif /* FREEBSD_WIP */
+					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
+				}
+			} else { /* count == 0 */
+				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+#ifdef FREEBSD_WIP
+					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+#endif /* FREEBSD_WIP */
+
+					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+				}
+			}
+		}
+	}
+
+	sx_xunlock(&rdev->pm.mutex);
+}
+
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
+{
+	int  crtc, vpos, hpos, vbl_status;
+	bool in_vbl = true;
+
+	/* Iterate over all active crtc's. All crtc's must be in vblank,
+	 * otherwise return in_vbl == false.
+	 */
+	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
+		if (rdev->pm.active_crtcs & (1 << crtc)) {
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
+				in_vbl = false;
+		}
+	}
+
+	return in_vbl;
+}
+
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+{
+	u32 stat_crtc = 0;
+	bool in_vbl = radeon_pm_in_vbl(rdev);
+
+	if (in_vbl == false)
+		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
+			 finish ? "exit" : "entry");
+	return in_vbl;
+}
+
+#ifdef FREEBSD_WIP
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
+{
+	struct radeon_device *rdev;
+	int resched;
+	rdev = container_of(work, struct radeon_device,
+				pm.dynpm_idle_work.work);
+
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+	sx_xlock(&rdev->pm.mutex);
+	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+		int not_processed = 0;
+		int i;
+
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			struct radeon_ring *ring = &rdev->ring[i];
+
+			if (ring->ready) {
+				not_processed += radeon_fence_count_emitted(rdev, i);
+				if (not_processed >= 3)
+					break;
+			}
+		}
+
+		if (not_processed >= 3) { /* should upclock */
+			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+				   rdev->pm.dynpm_can_upclock) {
+				rdev->pm.dynpm_planned_action =
+					DYNPM_ACTION_UPCLOCK;
+				rdev->pm.dynpm_action_timeout = jiffies +
+				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+			}
+		} else if (not_processed == 0) { /* should downclock */
+			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+				   rdev->pm.dynpm_can_downclock) {
+				rdev->pm.dynpm_planned_action =
+					DYNPM_ACTION_DOWNCLOCK;
+				rdev->pm.dynpm_action_timeout = jiffies +
+				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+			}
+		}
+
+		/* Note, radeon_pm_set_clocks is called with static_switch set
+		 * to false since we want to wait for vbl to avoid flicker.
+		 */
+		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+		    jiffies > rdev->pm.dynpm_action_timeout) {
+			radeon_pm_get_dynpm_state(rdev);
+			radeon_pm_set_clocks(rdev);
+		}
+
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+	}
+	sx_xunlock(&rdev->pm.mutex);
+	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+}
+#endif /* FREEBSD_WIP */
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+	/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+	if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+		seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+	else
+		seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+	if (rdev->asic->pm.get_memory_clock)
+		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+	if (rdev->pm.current_vddc)
+		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
+	if (rdev->asic->pm.get_pcie_lanes)
+		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
+
+	return 0;
+}
+
+static struct drm_info_list radeon_pm_info_list[] = {
+	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
+};
+#endif
+
+static int radeon_debugfs_pm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
+#else
+	return 0;
+#endif
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_pm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_prime.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_prime.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_prime.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,231 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_prime.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <drm/drmP.h>
+
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+					       enum dma_data_direction dir)
+{
+	struct radeon_bo *bo = attachment->dmabuf->priv;
+	struct drm_device *dev = bo->rdev->ddev;
+	int npages = bo->tbo.num_pages;
+	struct sg_table *sg;
+	int nents;
+
+	mutex_lock(&dev->struct_mutex);
+	sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+	nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+	mutex_unlock(&dev->struct_mutex);
+	return sg;
+}
+
+static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+				     struct sg_table *sg, enum dma_data_direction dir)
+{
+	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+	sg_free_table(sg);
+	kfree(sg);
+}
+
+static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+	struct radeon_bo *bo = dma_buf->priv;
+
+	if (bo->gem_base.export_dma_buf == dma_buf) {
+		DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
+		bo->gem_base.export_dma_buf = NULL;
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
+	}
+}
+
+static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+	return NULL;
+}
+
+static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+	return NULL;
+}
+
+static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+	struct radeon_bo *bo = dma_buf->priv;
+	struct drm_device *dev = bo->rdev->ddev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	if (bo->vmapping_count) {
+		bo->vmapping_count++;
+		goto out_unlock;
+	}
+
+	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+			  &bo->dma_buf_vmap);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ERR_PTR(ret);
+	}
+	bo->vmapping_count = 1;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return bo->dma_buf_vmap.virtual;
+}
+
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct radeon_bo *bo = dma_buf->priv;
+	struct drm_device *dev = bo->rdev->ddev;
+
+	mutex_lock(&dev->struct_mutex);
+	bo->vmapping_count--;
+	if (bo->vmapping_count == 0) {
+		ttm_bo_kunmap(&bo->dma_buf_vmap);
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+const static struct dma_buf_ops radeon_dmabuf_ops =  {
+	.map_dma_buf = radeon_gem_map_dma_buf,
+	.unmap_dma_buf = radeon_gem_unmap_dma_buf,
+	.release = radeon_gem_dmabuf_release,
+	.kmap = radeon_gem_kmap,
+	.kmap_atomic = radeon_gem_kmap_atomic,
+	.kunmap = radeon_gem_kunmap,
+	.kunmap_atomic = radeon_gem_kunmap_atomic,
+	.mmap = radeon_gem_prime_mmap,
+	.vmap = radeon_gem_prime_vmap,
+	.vunmap = radeon_gem_prime_vunmap,
+};
+
+static int radeon_prime_create(struct drm_device *dev,
+			       size_t size,
+			       struct sg_table *sg,
+			       struct radeon_bo **pbo)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_bo *bo;
+	int ret;
+
+	ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
+			       RADEON_GEM_DOMAIN_GTT, sg, pbo);
+	if (ret)
+		return ret;
+	bo = *pbo;
+	bo->gem_base.driver_private = bo;
+
+	mutex_lock(&rdev->gem.mutex);
+	list_add_tail(&bo->list, &rdev->gem.objects);
+	mutex_unlock(&rdev->gem.mutex);
+
+	return 0;
+}
+
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+					struct drm_gem_object *obj,
+					int flags)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+	int ret = 0;
+
+	ret = radeon_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return ERR_PTR(ret);
+
+	/* pin buffer into GTT */
+	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+	if (ret) {
+		radeon_bo_unreserve(bo);
+		return ERR_PTR(ret);
+	}
+	radeon_bo_unreserve(bo);
+	return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+					       struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct sg_table *sg;
+	struct radeon_bo *bo;
+	int ret;
+
+	if (dma_buf->ops == &radeon_dmabuf_ops) {
+		bo = dma_buf->priv;
+		if (bo->gem_base.dev == dev) {
+			drm_gem_object_reference(&bo->gem_base);
+			dma_buf_put(dma_buf);
+			return &bo->gem_base;
+		}
+	}
+
+	/* need to attach */
+	attach = dma_buf_attach(dma_buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_CAST(attach);
+
+	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sg)) {
+		ret = PTR_ERR(sg);
+		goto fail_detach;
+	}
+
+	ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
+	if (ret)
+		goto fail_unmap;
+
+	bo->gem_base.import_attach = attach;
+
+	return &bo->gem_base;
+
+fail_unmap:
+	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+	dma_buf_detach(dma_buf, attach);
+	return ERR_PTR(ret);
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_prime.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_reg.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_reg.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_reg.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,3714 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Authors:
+ *   Kevin E. Martin <martin at xfree86.org>
+ *   Rickard E. Faith <faith at valinux.com>
+ *   Alan Hourihane <alanh at fairlite.demon.co.uk>
+ *
+ * References:
+ *
+ * !!!! FIXME !!!!
+ *   RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
+ *   Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
+ *   1999.
+ *
+ * !!!! FIXME !!!!
+ *   RAGE 128 Software Development Manual (Technical Reference Manual P/N
+ *   SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
+ *
+ */
+
+/* !!!! FIXME !!!!  NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
+ * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
+ * ON THE RADEON.  A FULL AUDIT OF THIS CODE IS NEEDED!  */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_reg.h 261455 2014-02-04 03:36:42Z eadler $");
+
+#ifndef _RADEON_REG_H_
+#define _RADEON_REG_H_
+
+#include "r300_reg.h"
+#include "r500_reg.h"
+#include "r600_reg.h"
+#include "evergreen_reg.h"
+#include "ni_reg.h"
+#include "si_reg.h"
+
+#define RADEON_MC_AGP_LOCATION		0x014c
+#define		RADEON_MC_AGP_START_MASK	0x0000FFFF
+#define		RADEON_MC_AGP_START_SHIFT	0
+#define		RADEON_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RADEON_MC_AGP_TOP_SHIFT		16
+#define RADEON_MC_FB_LOCATION		0x0148
+#define		RADEON_MC_FB_START_MASK		0x0000FFFF
+#define		RADEON_MC_FB_START_SHIFT	0
+#define		RADEON_MC_FB_TOP_MASK		0xFFFF0000
+#define		RADEON_MC_FB_TOP_SHIFT		16
+#define RADEON_AGP_BASE_2		0x015c /* r200+ only */
+#define RADEON_AGP_BASE			0x0170
+
+#define ATI_DATATYPE_VQ				0
+#define ATI_DATATYPE_CI4			1
+#define ATI_DATATYPE_CI8			2
+#define ATI_DATATYPE_ARGB1555			3
+#define ATI_DATATYPE_RGB565			4
+#define ATI_DATATYPE_RGB888			5
+#define ATI_DATATYPE_ARGB8888			6
+#define ATI_DATATYPE_RGB332			7
+#define ATI_DATATYPE_Y8				8
+#define ATI_DATATYPE_RGB8			9
+#define ATI_DATATYPE_CI16			10
+#define ATI_DATATYPE_VYUY_422			11
+#define ATI_DATATYPE_YVYU_422			12
+#define ATI_DATATYPE_AYUV_444			14
+#define ATI_DATATYPE_ARGB4444			15
+
+				/* Registers for 2D/Video/Overlay */
+#define RADEON_ADAPTER_ID                   0x0f2c /* PCI */
+#define RADEON_AGP_BASE                     0x0170
+#define RADEON_AGP_CNTL                     0x0174
+#       define RADEON_AGP_APER_SIZE_256MB   (0x00 << 0)
+#       define RADEON_AGP_APER_SIZE_128MB   (0x20 << 0)
+#       define RADEON_AGP_APER_SIZE_64MB    (0x30 << 0)
+#       define RADEON_AGP_APER_SIZE_32MB    (0x38 << 0)
+#       define RADEON_AGP_APER_SIZE_16MB    (0x3c << 0)
+#       define RADEON_AGP_APER_SIZE_8MB     (0x3e << 0)
+#       define RADEON_AGP_APER_SIZE_4MB     (0x3f << 0)
+#       define RADEON_AGP_APER_SIZE_MASK    (0x3f << 0)
+#define RADEON_STATUS_PCI_CONFIG            0x06
+#       define RADEON_CAP_LIST              0x100000
+#define RADEON_CAPABILITIES_PTR_PCI_CONFIG  0x34 /* offset in PCI config*/
+#       define RADEON_CAP_PTR_MASK          0xfc /* mask off reserved bits of CAP_PTR */
+#       define RADEON_CAP_ID_NULL           0x00 /* End of capability list */
+#       define RADEON_CAP_ID_AGP            0x02 /* AGP capability ID */
+#       define RADEON_CAP_ID_EXP            0x10 /* PCI Express */
+#define RADEON_AGP_COMMAND                  0x0f60 /* PCI */
+#define RADEON_AGP_COMMAND_PCI_CONFIG       0x0060 /* offset in PCI config*/
+#       define RADEON_AGP_ENABLE            (1<<8)
+#define RADEON_AGP_PLL_CNTL                 0x000b /* PLL */
+#define RADEON_AGP_STATUS                   0x0f5c /* PCI */
+#       define RADEON_AGP_1X_MODE           0x01
+#       define RADEON_AGP_2X_MODE           0x02
+#       define RADEON_AGP_4X_MODE           0x04
+#       define RADEON_AGP_FW_MODE           0x10
+#       define RADEON_AGP_MODE_MASK         0x17
+#       define RADEON_AGPv3_MODE            0x08
+#       define RADEON_AGPv3_4X_MODE         0x01
+#       define RADEON_AGPv3_8X_MODE         0x02
+#define RADEON_ATTRDR                       0x03c1 /* VGA */
+#define RADEON_ATTRDW                       0x03c0 /* VGA */
+#define RADEON_ATTRX                        0x03c0 /* VGA */
+#define RADEON_AUX_SC_CNTL                  0x1660
+#       define RADEON_AUX1_SC_EN            (1 << 0)
+#       define RADEON_AUX1_SC_MODE_OR       (0 << 1)
+#       define RADEON_AUX1_SC_MODE_NAND     (1 << 1)
+#       define RADEON_AUX2_SC_EN            (1 << 2)
+#       define RADEON_AUX2_SC_MODE_OR       (0 << 3)
+#       define RADEON_AUX2_SC_MODE_NAND     (1 << 3)
+#       define RADEON_AUX3_SC_EN            (1 << 4)
+#       define RADEON_AUX3_SC_MODE_OR       (0 << 5)
+#       define RADEON_AUX3_SC_MODE_NAND     (1 << 5)
+#define RADEON_AUX1_SC_BOTTOM               0x1670
+#define RADEON_AUX1_SC_LEFT                 0x1664
+#define RADEON_AUX1_SC_RIGHT                0x1668
+#define RADEON_AUX1_SC_TOP                  0x166c
+#define RADEON_AUX2_SC_BOTTOM               0x1680
+#define RADEON_AUX2_SC_LEFT                 0x1674
+#define RADEON_AUX2_SC_RIGHT                0x1678
+#define RADEON_AUX2_SC_TOP                  0x167c
+#define RADEON_AUX3_SC_BOTTOM               0x1690
+#define RADEON_AUX3_SC_LEFT                 0x1684
+#define RADEON_AUX3_SC_RIGHT                0x1688
+#define RADEON_AUX3_SC_TOP                  0x168c
+#define RADEON_AUX_WINDOW_HORZ_CNTL         0x02d8
+#define RADEON_AUX_WINDOW_VERT_CNTL         0x02dc
+
+#define RADEON_BASE_CODE                    0x0f0b
+#define RADEON_BIOS_0_SCRATCH               0x0010
+#       define RADEON_FP_PANEL_SCALABLE     (1 << 16)
+#       define RADEON_FP_PANEL_SCALE_EN     (1 << 17)
+#       define RADEON_FP_CHIP_SCALE_EN      (1 << 18)
+#       define RADEON_DRIVER_BRIGHTNESS_EN  (1 << 26)
+#       define RADEON_DISPLAY_ROT_MASK      (3 << 28)
+#       define RADEON_DISPLAY_ROT_00        (0 << 28)
+#       define RADEON_DISPLAY_ROT_90        (1 << 28)
+#       define RADEON_DISPLAY_ROT_180       (2 << 28)
+#       define RADEON_DISPLAY_ROT_270       (3 << 28)
+#define RADEON_BIOS_1_SCRATCH               0x0014
+#define RADEON_BIOS_2_SCRATCH               0x0018
+#define RADEON_BIOS_3_SCRATCH               0x001c
+#define RADEON_BIOS_4_SCRATCH               0x0020
+#       define RADEON_CRT1_ATTACHED_MASK    (3 << 0)
+#       define RADEON_CRT1_ATTACHED_MONO    (1 << 0)
+#       define RADEON_CRT1_ATTACHED_COLOR   (2 << 0)
+#       define RADEON_LCD1_ATTACHED         (1 << 2)
+#       define RADEON_DFP1_ATTACHED         (1 << 3)
+#       define RADEON_TV1_ATTACHED_MASK     (3 << 4)
+#       define RADEON_TV1_ATTACHED_COMP     (1 << 4)
+#       define RADEON_TV1_ATTACHED_SVIDEO   (2 << 4)
+#       define RADEON_CRT2_ATTACHED_MASK    (3 << 8)
+#       define RADEON_CRT2_ATTACHED_MONO    (1 << 8)
+#       define RADEON_CRT2_ATTACHED_COLOR   (2 << 8)
+#       define RADEON_DFP2_ATTACHED         (1 << 11)
+#define RADEON_BIOS_5_SCRATCH               0x0024
+#       define RADEON_LCD1_ON               (1 << 0)
+#       define RADEON_CRT1_ON               (1 << 1)
+#       define RADEON_TV1_ON                (1 << 2)
+#       define RADEON_DFP1_ON               (1 << 3)
+#       define RADEON_CRT2_ON               (1 << 5)
+#       define RADEON_CV1_ON                (1 << 6)
+#       define RADEON_DFP2_ON               (1 << 7)
+#       define RADEON_LCD1_CRTC_MASK        (1 << 8)
+#       define RADEON_LCD1_CRTC_SHIFT       8
+#       define RADEON_CRT1_CRTC_MASK        (1 << 9)
+#       define RADEON_CRT1_CRTC_SHIFT       9
+#       define RADEON_TV1_CRTC_MASK         (1 << 10)
+#       define RADEON_TV1_CRTC_SHIFT        10
+#       define RADEON_DFP1_CRTC_MASK        (1 << 11)
+#       define RADEON_DFP1_CRTC_SHIFT       11
+#       define RADEON_CRT2_CRTC_MASK        (1 << 12)
+#       define RADEON_CRT2_CRTC_SHIFT       12
+#       define RADEON_CV1_CRTC_MASK         (1 << 13)
+#       define RADEON_CV1_CRTC_SHIFT        13
+#       define RADEON_DFP2_CRTC_MASK        (1 << 14)
+#       define RADEON_DFP2_CRTC_SHIFT       14
+#       define RADEON_ACC_REQ_LCD1          (1 << 16)
+#       define RADEON_ACC_REQ_CRT1          (1 << 17)
+#       define RADEON_ACC_REQ_TV1           (1 << 18)
+#       define RADEON_ACC_REQ_DFP1          (1 << 19)
+#       define RADEON_ACC_REQ_CRT2          (1 << 21)
+#       define RADEON_ACC_REQ_TV2           (1 << 22)
+#       define RADEON_ACC_REQ_DFP2          (1 << 23)
+#define RADEON_BIOS_6_SCRATCH               0x0028
+#       define RADEON_ACC_MODE_CHANGE       (1 << 2)
+#       define RADEON_EXT_DESKTOP_MODE      (1 << 3)
+#       define RADEON_LCD_DPMS_ON           (1 << 20)
+#       define RADEON_CRT_DPMS_ON           (1 << 21)
+#       define RADEON_TV_DPMS_ON            (1 << 22)
+#       define RADEON_DFP_DPMS_ON           (1 << 23)
+#       define RADEON_DPMS_MASK             (3 << 24)
+#       define RADEON_DPMS_ON               (0 << 24)
+#       define RADEON_DPMS_STANDBY          (1 << 24)
+#       define RADEON_DPMS_SUSPEND          (2 << 24)
+#       define RADEON_DPMS_OFF              (3 << 24)
+#       define RADEON_SCREEN_BLANKING       (1 << 26)
+#       define RADEON_DRIVER_CRITICAL       (1 << 27)
+#       define RADEON_DISPLAY_SWITCHING_DIS (1 << 30)
+#define RADEON_BIOS_7_SCRATCH               0x002c
+#       define RADEON_SYS_HOTKEY            (1 << 10)
+#       define RADEON_DRV_LOADED            (1 << 12)
+#define RADEON_BIOS_ROM                     0x0f30 /* PCI */
+#define RADEON_BIST                         0x0f0f /* PCI */
+#define RADEON_BRUSH_DATA0                  0x1480
+#define RADEON_BRUSH_DATA1                  0x1484
+#define RADEON_BRUSH_DATA10                 0x14a8
+#define RADEON_BRUSH_DATA11                 0x14ac
+#define RADEON_BRUSH_DATA12                 0x14b0
+#define RADEON_BRUSH_DATA13                 0x14b4
+#define RADEON_BRUSH_DATA14                 0x14b8
+#define RADEON_BRUSH_DATA15                 0x14bc
+#define RADEON_BRUSH_DATA16                 0x14c0
+#define RADEON_BRUSH_DATA17                 0x14c4
+#define RADEON_BRUSH_DATA18                 0x14c8
+#define RADEON_BRUSH_DATA19                 0x14cc
+#define RADEON_BRUSH_DATA2                  0x1488
+#define RADEON_BRUSH_DATA20                 0x14d0
+#define RADEON_BRUSH_DATA21                 0x14d4
+#define RADEON_BRUSH_DATA22                 0x14d8
+#define RADEON_BRUSH_DATA23                 0x14dc
+#define RADEON_BRUSH_DATA24                 0x14e0
+#define RADEON_BRUSH_DATA25                 0x14e4
+#define RADEON_BRUSH_DATA26                 0x14e8
+#define RADEON_BRUSH_DATA27                 0x14ec
+#define RADEON_BRUSH_DATA28                 0x14f0
+#define RADEON_BRUSH_DATA29                 0x14f4
+#define RADEON_BRUSH_DATA3                  0x148c
+#define RADEON_BRUSH_DATA30                 0x14f8
+#define RADEON_BRUSH_DATA31                 0x14fc
+#define RADEON_BRUSH_DATA32                 0x1500
+#define RADEON_BRUSH_DATA33                 0x1504
+#define RADEON_BRUSH_DATA34                 0x1508
+#define RADEON_BRUSH_DATA35                 0x150c
+#define RADEON_BRUSH_DATA36                 0x1510
+#define RADEON_BRUSH_DATA37                 0x1514
+#define RADEON_BRUSH_DATA38                 0x1518
+#define RADEON_BRUSH_DATA39                 0x151c
+#define RADEON_BRUSH_DATA4                  0x1490
+#define RADEON_BRUSH_DATA40                 0x1520
+#define RADEON_BRUSH_DATA41                 0x1524
+#define RADEON_BRUSH_DATA42                 0x1528
+#define RADEON_BRUSH_DATA43                 0x152c
+#define RADEON_BRUSH_DATA44                 0x1530
+#define RADEON_BRUSH_DATA45                 0x1534
+#define RADEON_BRUSH_DATA46                 0x1538
+#define RADEON_BRUSH_DATA47                 0x153c
+#define RADEON_BRUSH_DATA48                 0x1540
+#define RADEON_BRUSH_DATA49                 0x1544
+#define RADEON_BRUSH_DATA5                  0x1494
+#define RADEON_BRUSH_DATA50                 0x1548
+#define RADEON_BRUSH_DATA51                 0x154c
+#define RADEON_BRUSH_DATA52                 0x1550
+#define RADEON_BRUSH_DATA53                 0x1554
+#define RADEON_BRUSH_DATA54                 0x1558
+#define RADEON_BRUSH_DATA55                 0x155c
+#define RADEON_BRUSH_DATA56                 0x1560
+#define RADEON_BRUSH_DATA57                 0x1564
+#define RADEON_BRUSH_DATA58                 0x1568
+#define RADEON_BRUSH_DATA59                 0x156c
+#define RADEON_BRUSH_DATA6                  0x1498
+#define RADEON_BRUSH_DATA60                 0x1570
+#define RADEON_BRUSH_DATA61                 0x1574
+#define RADEON_BRUSH_DATA62                 0x1578
+#define RADEON_BRUSH_DATA63                 0x157c
+#define RADEON_BRUSH_DATA7                  0x149c
+#define RADEON_BRUSH_DATA8                  0x14a0
+#define RADEON_BRUSH_DATA9                  0x14a4
+#define RADEON_BRUSH_SCALE                  0x1470
+#define RADEON_BRUSH_Y_X                    0x1474
+#define RADEON_BUS_CNTL                     0x0030
+#       define RADEON_BUS_MASTER_DIS         (1 << 6)
+#       define RADEON_BUS_BIOS_DIS_ROM       (1 << 12)
+#	define RS600_BUS_MASTER_DIS	     (1 << 14)
+#	define RS600_MSI_REARM		     (1 << 20) /* rs600/rs690/rs740 */
+#       define RADEON_BUS_RD_DISCARD_EN      (1 << 24)
+#       define RADEON_BUS_RD_ABORT_EN        (1 << 25)
+#       define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
+#       define RADEON_BUS_WRT_BURST          (1 << 29)
+#       define RADEON_BUS_READ_BURST         (1 << 30)
+#define RADEON_BUS_CNTL1                    0x0034
+#       define RADEON_BUS_WAIT_ON_LOCK_EN    (1 << 4)
+#define RV370_BUS_CNTL                      0x004c
+#       define RV370_BUS_BIOS_DIS_ROM        (1 << 2)
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#define RADEON_MSI_REARM_EN		    0x0160
+#	define RV370_MSI_REARM_EN	     (1 << 0)
+
+/* #define RADEON_PCIE_INDEX                   0x0030 */
+/* #define RADEON_PCIE_DATA                    0x0034 */
+#define RADEON_PCIE_LC_LINK_WIDTH_CNTL             0xa2 /* PCIE */
+#       define RADEON_PCIE_LC_LINK_WIDTH_SHIFT     0
+#       define RADEON_PCIE_LC_LINK_WIDTH_MASK      0x7
+#       define RADEON_PCIE_LC_LINK_WIDTH_X0        0
+#       define RADEON_PCIE_LC_LINK_WIDTH_X1        1
+#       define RADEON_PCIE_LC_LINK_WIDTH_X2        2
+#       define RADEON_PCIE_LC_LINK_WIDTH_X4        3
+#       define RADEON_PCIE_LC_LINK_WIDTH_X8        4
+#       define RADEON_PCIE_LC_LINK_WIDTH_X12       5
+#       define RADEON_PCIE_LC_LINK_WIDTH_X16       6
+#       define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT  4
+#       define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK   0x70
+#       define RADEON_PCIE_LC_RECONFIG_NOW         (1 << 8)
+#       define RADEON_PCIE_LC_RECONFIG_LATER       (1 << 9)
+#       define RADEON_PCIE_LC_SHORT_RECONFIG_EN    (1 << 10)
+#       define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE   (1 << 7)
+#       define R600_PCIE_LC_RENEGOTIATION_SUPPORT  (1 << 9)
+#       define R600_PCIE_LC_RENEGOTIATE_EN         (1 << 10)
+#       define R600_PCIE_LC_SHORT_RECONFIG_EN      (1 << 11)
+#       define R600_PCIE_LC_UPCONFIGURE_SUPPORT    (1 << 12)
+#       define R600_PCIE_LC_UPCONFIGURE_DIS        (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX      0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX      0x66c
+
+#define RADEON_CACHE_CNTL                   0x1724
+#define RADEON_CACHE_LINE                   0x0f0c /* PCI */
+#define RADEON_CAPABILITIES_ID              0x0f50 /* PCI */
+#define RADEON_CAPABILITIES_PTR             0x0f34 /* PCI */
+#define RADEON_CLK_PIN_CNTL                 0x0001 /* PLL */
+#       define RADEON_DONT_USE_XTALIN       (1 << 4)
+#       define RADEON_SCLK_DYN_START_CNTL   (1 << 15)
+#define RADEON_CLOCK_CNTL_DATA              0x000c
+#define RADEON_CLOCK_CNTL_INDEX             0x0008
+#       define RADEON_PLL_WR_EN             (1 << 7)
+#       define RADEON_PLL_DIV_SEL           (3 << 8)
+#       define RADEON_PLL2_DIV_SEL_MASK     (~(3 << 8))
+#define RADEON_CLK_PWRMGT_CNTL              0x0014
+#       define RADEON_ENGIN_DYNCLK_MODE     (1 << 12)
+#       define RADEON_ACTIVE_HILO_LAT_MASK  (3 << 13)
+#       define RADEON_ACTIVE_HILO_LAT_SHIFT 13
+#       define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12)
+#       define RADEON_MC_BUSY               (1 << 16)
+#       define RADEON_DLL_READY             (1 << 19)
+#       define RADEON_CG_NO1_DEBUG_0        (1 << 24)
+#       define RADEON_CG_NO1_DEBUG_MASK     (0x1f << 24)
+#       define RADEON_DYN_STOP_MODE_MASK    (7 << 21)
+#       define RADEON_TVPLL_PWRMGT_OFF      (1 << 30)
+#       define RADEON_TVCLK_TURNOFF         (1U << 31)
+#define RADEON_PLL_PWRMGT_CNTL              0x0015 /* PLL */
+#	define RADEON_PM_MODE_SEL           (1 << 13)
+#       define RADEON_TCL_BYPASS_DISABLE    (1 << 20)
+#define RADEON_CLR_CMP_CLR_3D               0x1a24
+#define RADEON_CLR_CMP_CLR_DST              0x15c8
+#define RADEON_CLR_CMP_CLR_SRC              0x15c4
+#define RADEON_CLR_CMP_CNTL                 0x15c0
+#       define RADEON_SRC_CMP_EQ_COLOR      (4 <<  0)
+#       define RADEON_SRC_CMP_NEQ_COLOR     (5 <<  0)
+#       define RADEON_CLR_CMP_SRC_SOURCE    (1 << 24)
+#define RADEON_CLR_CMP_MASK                 0x15cc
+#       define RADEON_CLR_CMP_MSK           0xffffffff
+#define RADEON_CLR_CMP_MASK_3D              0x1A28
+#define RADEON_COMMAND                      0x0f04 /* PCI */
+#define RADEON_COMPOSITE_SHADOW_ID          0x1a0c
+#define RADEON_CONFIG_APER_0_BASE           0x0100
+#define RADEON_CONFIG_APER_1_BASE           0x0104
+#define RADEON_CONFIG_APER_SIZE             0x0108
+#define RADEON_CONFIG_BONDS                 0x00e8
+#define RADEON_CONFIG_CNTL                  0x00e0
+#       define RADEON_CFG_VGA_RAM_EN        (1 << 8)
+#       define RADEON_CFG_VGA_IO_DIS        (1 << 9)
+#       define RADEON_CFG_ATI_REV_A11       (0   << 16)
+#       define RADEON_CFG_ATI_REV_A12       (1   << 16)
+#       define RADEON_CFG_ATI_REV_A13       (2   << 16)
+#       define RADEON_CFG_ATI_REV_ID_MASK   (0xf << 16)
+#define RADEON_CONFIG_MEMSIZE               0x00f8
+#define RADEON_CONFIG_MEMSIZE_EMBEDDED      0x0114
+#define RADEON_CONFIG_REG_1_BASE            0x010c
+#define RADEON_CONFIG_REG_APER_SIZE         0x0110
+#define RADEON_CONFIG_XSTRAP                0x00e4
+#define RADEON_CONSTANT_COLOR_C             0x1d34
+#       define RADEON_CONSTANT_COLOR_MASK   0x00ffffff
+#       define RADEON_CONSTANT_COLOR_ONE    0x00ffffff
+#       define RADEON_CONSTANT_COLOR_ZERO   0x00000000
+#define RADEON_CRC_CMDFIFO_ADDR             0x0740
+#define RADEON_CRC_CMDFIFO_DOUT             0x0744
+#define RADEON_GRPH_BUFFER_CNTL             0x02f0
+#       define RADEON_GRPH_START_REQ_MASK          (0x7f)
+#       define RADEON_GRPH_START_REQ_SHIFT         0
+#       define RADEON_GRPH_STOP_REQ_MASK           (0x7f<<8)
+#       define RADEON_GRPH_STOP_REQ_SHIFT          8
+#       define RADEON_GRPH_CRITICAL_POINT_MASK     (0x7f<<16)
+#       define RADEON_GRPH_CRITICAL_POINT_SHIFT    16
+#       define RADEON_GRPH_CRITICAL_CNTL           (1<<28)
+#       define RADEON_GRPH_BUFFER_SIZE             (1<<29)
+#       define RADEON_GRPH_CRITICAL_AT_SOF         (1<<30)
+#       define RADEON_GRPH_STOP_CNTL               (1<<31)
+#define RADEON_GRPH2_BUFFER_CNTL            0x03f0
+#       define RADEON_GRPH2_START_REQ_MASK         (0x7f)
+#       define RADEON_GRPH2_START_REQ_SHIFT         0
+#       define RADEON_GRPH2_STOP_REQ_MASK          (0x7f<<8)
+#       define RADEON_GRPH2_STOP_REQ_SHIFT         8
+#       define RADEON_GRPH2_CRITICAL_POINT_MASK    (0x7f<<16)
+#       define RADEON_GRPH2_CRITICAL_POINT_SHIFT   16
+#       define RADEON_GRPH2_CRITICAL_CNTL          (1<<28)
+#       define RADEON_GRPH2_BUFFER_SIZE            (1<<29)
+#       define RADEON_GRPH2_CRITICAL_AT_SOF        (1<<30)
+#       define RADEON_GRPH2_STOP_CNTL              (1<<31)
+#define RADEON_CRTC_CRNT_FRAME              0x0214
+#define RADEON_CRTC_EXT_CNTL                0x0054
+#       define RADEON_CRTC_VGA_XOVERSCAN    (1 <<  0)
+#       define RADEON_VGA_ATI_LINEAR        (1 <<  3)
+#       define RADEON_XCRT_CNT_EN           (1 <<  6)
+#       define RADEON_CRTC_HSYNC_DIS        (1 <<  8)
+#       define RADEON_CRTC_VSYNC_DIS        (1 <<  9)
+#       define RADEON_CRTC_DISPLAY_DIS      (1 << 10)
+#       define RADEON_CRTC_SYNC_TRISTAT     (1 << 11)
+#       define RADEON_CRTC_CRT_ON           (1 << 15)
+#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE      0x0055
+#       define RADEON_CRTC_HSYNC_DIS_BYTE   (1 <<  0)
+#       define RADEON_CRTC_VSYNC_DIS_BYTE   (1 <<  1)
+#       define RADEON_CRTC_DISPLAY_DIS_BYTE (1 <<  2)
+#define RADEON_CRTC_GEN_CNTL                0x0050
+#       define RADEON_CRTC_DBL_SCAN_EN      (1 <<  0)
+#       define RADEON_CRTC_INTERLACE_EN     (1 <<  1)
+#       define RADEON_CRTC_CSYNC_EN         (1 <<  4)
+#       define RADEON_CRTC_ICON_EN          (1 << 15)
+#       define RADEON_CRTC_CUR_EN           (1 << 16)
+#       define RADEON_CRTC_VSTAT_MODE_MASK  (3 << 17)
+#       define RADEON_CRTC_CUR_MODE_MASK    (7 << 20)
+#       define RADEON_CRTC_CUR_MODE_SHIFT   20
+#       define RADEON_CRTC_CUR_MODE_MONO    0
+#       define RADEON_CRTC_CUR_MODE_24BPP   2
+#       define RADEON_CRTC_EXT_DISP_EN      (1 << 24)
+#       define RADEON_CRTC_EN               (1 << 25)
+#       define RADEON_CRTC_DISP_REQ_EN_B    (1 << 26)
+#define RADEON_CRTC2_GEN_CNTL               0x03f8
+#       define RADEON_CRTC2_DBL_SCAN_EN     (1 <<  0)
+#       define RADEON_CRTC2_INTERLACE_EN    (1 <<  1)
+#       define RADEON_CRTC2_SYNC_TRISTAT    (1 <<  4)
+#       define RADEON_CRTC2_HSYNC_TRISTAT   (1 <<  5)
+#       define RADEON_CRTC2_VSYNC_TRISTAT   (1 <<  6)
+#       define RADEON_CRTC2_CRT2_ON         (1 <<  7)
+#       define RADEON_CRTC2_PIX_WIDTH_SHIFT 8
+#       define RADEON_CRTC2_PIX_WIDTH_MASK  (0xf << 8)
+#       define RADEON_CRTC2_ICON_EN         (1 << 15)
+#       define RADEON_CRTC2_CUR_EN          (1 << 16)
+#       define RADEON_CRTC2_CUR_MODE_MASK   (7 << 20)
+#       define RADEON_CRTC2_DISP_DIS        (1 << 23)
+#       define RADEON_CRTC2_EN              (1 << 25)
+#       define RADEON_CRTC2_DISP_REQ_EN_B   (1 << 26)
+#       define RADEON_CRTC2_CSYNC_EN        (1 << 27)
+#       define RADEON_CRTC2_HSYNC_DIS       (1 << 28)
+#       define RADEON_CRTC2_VSYNC_DIS       (1 << 29)
+#define RADEON_CRTC_MORE_CNTL               0x27c
+#       define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2)
+#       define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3)
+#       define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
+#       define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
+#define RADEON_CRTC_GUI_TRIG_VLINE          0x0218
+#define RADEON_CRTC_H_SYNC_STRT_WID         0x0204
+#       define RADEON_CRTC_H_SYNC_STRT_PIX        (0x07  <<  0)
+#       define RADEON_CRTC_H_SYNC_STRT_CHAR       (0x3ff <<  3)
+#       define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
+#       define RADEON_CRTC_H_SYNC_WID             (0x3f  << 16)
+#       define RADEON_CRTC_H_SYNC_WID_SHIFT       16
+#       define RADEON_CRTC_H_SYNC_POL             (1     << 23)
+#define RADEON_CRTC2_H_SYNC_STRT_WID        0x0304
+#       define RADEON_CRTC2_H_SYNC_STRT_PIX        (0x07  <<  0)
+#       define RADEON_CRTC2_H_SYNC_STRT_CHAR       (0x3ff <<  3)
+#       define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
+#       define RADEON_CRTC2_H_SYNC_WID             (0x3f  << 16)
+#       define RADEON_CRTC2_H_SYNC_WID_SHIFT       16
+#       define RADEON_CRTC2_H_SYNC_POL             (1     << 23)
+#define RADEON_CRTC_H_TOTAL_DISP            0x0200
+#       define RADEON_CRTC_H_TOTAL          (0x03ff << 0)
+#       define RADEON_CRTC_H_TOTAL_SHIFT    0
+#       define RADEON_CRTC_H_DISP           (0x01ff << 16)
+#       define RADEON_CRTC_H_DISP_SHIFT     16
+#define RADEON_CRTC2_H_TOTAL_DISP           0x0300
+#       define RADEON_CRTC2_H_TOTAL         (0x03ff << 0)
+#       define RADEON_CRTC2_H_TOTAL_SHIFT   0
+#       define RADEON_CRTC2_H_DISP          (0x01ff << 16)
+#       define RADEON_CRTC2_H_DISP_SHIFT    16
+
+#define RADEON_CRTC_OFFSET_RIGHT	    0x0220
+#define RADEON_CRTC_OFFSET                  0x0224
+#	define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30)
+#	define RADEON_CRTC_OFFSET__OFFSET_LOCK	   (1<<31)
+
+#define RADEON_CRTC2_OFFSET                 0x0324
+#	define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30)
+#	define RADEON_CRTC2_OFFSET__OFFSET_LOCK	    (1<<31)
+#define RADEON_CRTC_OFFSET_CNTL             0x0228
+#       define RADEON_CRTC_TILE_LINE_SHIFT              0
+#       define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT        4
+#	define R300_CRTC_X_Y_MODE_EN_RIGHT		(1 << 6)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK   (3 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO   (0 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS    (3 << 7)
+#	define R300_CRTC_X_Y_MODE_EN			(1 << 9)
+#	define R300_CRTC_MICRO_TILE_BUFFER_MASK		(3 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_AUTO		(0 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_SINGLE	(1 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE	(2 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_DIS		(3 << 10)
+#	define R300_CRTC_MICRO_TILE_EN_RIGHT		(1 << 12)
+#	define R300_CRTC_MICRO_TILE_EN			(1 << 13)
+#	define R300_CRTC_MACRO_TILE_EN_RIGHT		(1 << 14)
+#       define R300_CRTC_MACRO_TILE_EN                  (1 << 15)
+#       define RADEON_CRTC_TILE_EN_RIGHT                (1 << 14)
+#       define RADEON_CRTC_TILE_EN                      (1 << 15)
+#       define RADEON_CRTC_OFFSET_FLIP_CNTL             (1 << 16)
+#       define RADEON_CRTC_STEREO_OFFSET_EN             (1 << 17)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN      (1 << 28)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN     (1 << 29)
+
+#define R300_CRTC_TILE_X0_Y0	            0x0350
+#define R300_CRTC2_TILE_X0_Y0	            0x0358
+
+#define RADEON_CRTC2_OFFSET_CNTL            0x0328
+#       define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16)
+#       define RADEON_CRTC2_TILE_EN         (1 << 15)
+#define RADEON_CRTC_PITCH                   0x022c
+#	define RADEON_CRTC_PITCH__SHIFT		 0
+#	define RADEON_CRTC_PITCH__RIGHT_SHIFT	16
+
+#define RADEON_CRTC2_PITCH                  0x032c
+#define RADEON_CRTC_STATUS                  0x005c
+#       define RADEON_CRTC_VBLANK_CUR       (1 <<  0)
+#       define RADEON_CRTC_VBLANK_SAVE      (1 <<  1)
+#       define RADEON_CRTC_VBLANK_SAVE_CLEAR  (1 <<  1)
+#define RADEON_CRTC2_STATUS                  0x03fc
+#       define RADEON_CRTC2_VBLANK_CUR       (1 <<  0)
+#       define RADEON_CRTC2_VBLANK_SAVE      (1 <<  1)
+#       define RADEON_CRTC2_VBLANK_SAVE_CLEAR  (1 <<  1)
+#define RADEON_CRTC_V_SYNC_STRT_WID         0x020c
+#       define RADEON_CRTC_V_SYNC_STRT        (0x7ff <<  0)
+#       define RADEON_CRTC_V_SYNC_STRT_SHIFT  0
+#       define RADEON_CRTC_V_SYNC_WID         (0x1f  << 16)
+#       define RADEON_CRTC_V_SYNC_WID_SHIFT   16
+#       define RADEON_CRTC_V_SYNC_POL         (1     << 23)
+#define RADEON_CRTC2_V_SYNC_STRT_WID        0x030c
+#       define RADEON_CRTC2_V_SYNC_STRT       (0x7ff <<  0)
+#       define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
+#       define RADEON_CRTC2_V_SYNC_WID        (0x1f  << 16)
+#       define RADEON_CRTC2_V_SYNC_WID_SHIFT  16
+#       define RADEON_CRTC2_V_SYNC_POL        (1     << 23)
+#define RADEON_CRTC_V_TOTAL_DISP            0x0208
+#       define RADEON_CRTC_V_TOTAL          (0x07ff << 0)
+#       define RADEON_CRTC_V_TOTAL_SHIFT    0
+#       define RADEON_CRTC_V_DISP           (0x07ff << 16)
+#       define RADEON_CRTC_V_DISP_SHIFT     16
+#define RADEON_CRTC2_V_TOTAL_DISP           0x0308
+#       define RADEON_CRTC2_V_TOTAL         (0x07ff << 0)
+#       define RADEON_CRTC2_V_TOTAL_SHIFT   0
+#       define RADEON_CRTC2_V_DISP          (0x07ff << 16)
+#       define RADEON_CRTC2_V_DISP_SHIFT    16
+#define RADEON_CRTC_VLINE_CRNT_VLINE        0x0210
+#       define RADEON_CRTC_CRNT_VLINE_MASK  (0x7ff << 16)
+#define RADEON_CRTC2_CRNT_FRAME             0x0314
+#define RADEON_CRTC2_GUI_TRIG_VLINE         0x0318
+#define RADEON_CRTC2_VLINE_CRNT_VLINE       0x0310
+#define RADEON_CRTC8_DATA                   0x03d5 /* VGA, 0x3b5 */
+#define RADEON_CRTC8_IDX                    0x03d4 /* VGA, 0x3b4 */
+#define RADEON_CUR_CLR0                     0x026c
+#define RADEON_CUR_CLR1                     0x0270
+#define RADEON_CUR_HORZ_VERT_OFF            0x0268
+#define RADEON_CUR_HORZ_VERT_POSN           0x0264
+#define RADEON_CUR_OFFSET                   0x0260
+#       define RADEON_CUR_LOCK              (1U << 31)
+#define RADEON_CUR2_CLR0                    0x036c
+#define RADEON_CUR2_CLR1                    0x0370
+#define RADEON_CUR2_HORZ_VERT_OFF           0x0368
+#define RADEON_CUR2_HORZ_VERT_POSN          0x0364
+#define RADEON_CUR2_OFFSET                  0x0360
+#       define RADEON_CUR2_LOCK             (1U << 31)
+
+#define RADEON_DAC_CNTL                     0x0058
+#       define RADEON_DAC_RANGE_CNTL        (3 <<  0)
+#       define RADEON_DAC_RANGE_CNTL_PS2    (2 <<  0)
+#       define RADEON_DAC_RANGE_CNTL_MASK   0x03
+#       define RADEON_DAC_BLANKING          (1 <<  2)
+#       define RADEON_DAC_CMP_EN            (1 <<  3)
+#       define RADEON_DAC_CMP_OUTPUT        (1 <<  7)
+#       define RADEON_DAC_8BIT_EN           (1 <<  8)
+#       define RADEON_DAC_TVO_EN            (1 << 10)
+#       define RADEON_DAC_VGA_ADR_EN        (1 << 13)
+#       define RADEON_DAC_PDWN              (1 << 15)
+#       define RADEON_DAC_MASK_ALL          (0xff << 24)
+#define RADEON_DAC_CNTL2                    0x007c
+#       define RADEON_DAC2_TV_CLK_SEL       (0 <<  1)
+#       define RADEON_DAC2_DAC_CLK_SEL      (1 <<  0)
+#       define RADEON_DAC2_DAC2_CLK_SEL     (1 <<  1)
+#       define RADEON_DAC2_PALETTE_ACC_CTL  (1 <<  5)
+#       define RADEON_DAC2_CMP_EN           (1 <<  7)
+#       define RADEON_DAC2_CMP_OUT_R        (1 <<  8)
+#       define RADEON_DAC2_CMP_OUT_G        (1 <<  9)
+#       define RADEON_DAC2_CMP_OUT_B        (1 << 10)
+#       define RADEON_DAC2_CMP_OUTPUT       (1 << 11)
+#define RADEON_DAC_EXT_CNTL                 0x0280
+#       define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0)
+#       define RADEON_DAC2_FORCE_DATA_EN      (1 << 1)
+#       define RADEON_DAC_FORCE_BLANK_OFF_EN  (1 << 4)
+#       define RADEON_DAC_FORCE_DATA_EN       (1 << 5)
+#       define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_R    (0 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_G    (1 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_B    (2 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_RGB  (3 << 6)
+#       define RADEON_DAC_FORCE_DATA_MASK   0x0003ff00
+#       define RADEON_DAC_FORCE_DATA_SHIFT  8
+#define RADEON_DAC_MACRO_CNTL               0x0d04
+#       define RADEON_DAC_PDWN_R            (1 << 16)
+#       define RADEON_DAC_PDWN_G            (1 << 17)
+#       define RADEON_DAC_PDWN_B            (1 << 18)
+#define RADEON_DISP_PWR_MAN                 0x0d08
+#       define RADEON_DISP_PWR_MAN_D3_CRTC_EN      (1 << 0)
+#       define RADEON_DISP_PWR_MAN_D3_CRTC2_EN     (1 << 4)
+#       define RADEON_DISP_PWR_MAN_DPMS_ON  (0 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_STANDBY    (1 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_SUSPEND    (2 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
+#       define RADEON_DISP_D3_RST           (1 << 16)
+#       define RADEON_DISP_D3_REG_RST       (1 << 17)
+#       define RADEON_DISP_D3_GRPH_RST      (1 << 18)
+#       define RADEON_DISP_D3_SUBPIC_RST    (1 << 19)
+#       define RADEON_DISP_D3_OV0_RST       (1 << 20)
+#       define RADEON_DISP_D1D2_GRPH_RST    (1 << 21)
+#       define RADEON_DISP_D1D2_SUBPIC_RST  (1 << 22)
+#       define RADEON_DISP_D1D2_OV0_RST     (1 << 23)
+#       define RADEON_DIG_TMDS_ENABLE_RST   (1 << 24)
+#       define RADEON_TV_ENABLE_RST         (1 << 25)
+#       define RADEON_AUTO_PWRUP_EN         (1 << 26)
+#define RADEON_TV_DAC_CNTL                  0x088c
+#       define RADEON_TV_DAC_NBLANK         (1 << 0)
+#       define RADEON_TV_DAC_NHOLD          (1 << 1)
+#       define RADEON_TV_DAC_PEDESTAL       (1 <<  2)
+#       define RADEON_TV_MONITOR_DETECT_EN  (1 <<  4)
+#       define RADEON_TV_DAC_CMPOUT         (1 <<  5)
+#       define RADEON_TV_DAC_STD_MASK       (3 <<  8)
+#       define RADEON_TV_DAC_STD_PAL        (0 <<  8)
+#       define RADEON_TV_DAC_STD_NTSC       (1 <<  8)
+#       define RADEON_TV_DAC_STD_PS2        (2 <<  8)
+#       define RADEON_TV_DAC_STD_RS343      (3 <<  8)
+#       define RADEON_TV_DAC_BGSLEEP        (1 <<  6)
+#       define RADEON_TV_DAC_BGADJ_MASK     (0xf <<  16)
+#       define RADEON_TV_DAC_BGADJ_SHIFT    16
+#       define RADEON_TV_DAC_DACADJ_MASK    (0xf <<  20)
+#       define RADEON_TV_DAC_DACADJ_SHIFT   20
+#       define RADEON_TV_DAC_RDACPD         (1 <<  24)
+#       define RADEON_TV_DAC_GDACPD         (1 <<  25)
+#       define RADEON_TV_DAC_BDACPD         (1 <<  26)
+#       define RADEON_TV_DAC_RDACDET        (1 << 29)
+#       define RADEON_TV_DAC_GDACDET        (1 << 30)
+#       define RADEON_TV_DAC_BDACDET        (1U << 31)
+#       define R420_TV_DAC_DACADJ_MASK      (0x1f <<  20)
+#       define R420_TV_DAC_RDACPD           (1 <<  25)
+#       define R420_TV_DAC_GDACPD           (1 <<  26)
+#       define R420_TV_DAC_BDACPD           (1 <<  27)
+#       define R420_TV_DAC_TVENABLE         (1 <<  28)
+#define RADEON_DISP_HW_DEBUG                0x0d14
+#       define RADEON_CRT2_DISP1_SEL        (1 <<  5)
+#define RADEON_DISP_OUTPUT_CNTL             0x0d64
+#       define RADEON_DISP_DAC_SOURCE_MASK  0x03
+#       define RADEON_DISP_DAC2_SOURCE_MASK  0x0c
+#       define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
+#       define RADEON_DISP_DAC_SOURCE_RMX   0x02
+#       define RADEON_DISP_DAC_SOURCE_LTU   0x03
+#       define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
+#       define RADEON_DISP_TVDAC_SOURCE_MASK  (0x03 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_CRTC  0x0
+#       define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_RMX   (0x02 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_LTU   (0x03 << 2)
+#       define RADEON_DISP_TRANS_MATRIX_MASK  (0x03 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_GRAPHICS  (0x01 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_VIDEO     (0x02 << 4)
+#       define RADEON_DISP_TV_SOURCE_CRTC   (1 << 16) /* crtc1 or crtc2 */
+#       define RADEON_DISP_TV_SOURCE_LTU    (0 << 16) /* linear transform unit */
+#define RADEON_DISP_TV_OUT_CNTL             0x0d6c
+#       define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16)
+#       define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16)
+#define RADEON_DAC_CRC_SIG                  0x02cc
+#define RADEON_DAC_DATA                     0x03c9 /* VGA */
+#define RADEON_DAC_MASK                     0x03c6 /* VGA */
+#define RADEON_DAC_R_INDEX                  0x03c7 /* VGA */
+#define RADEON_DAC_W_INDEX                  0x03c8 /* VGA */
+#define RADEON_DDA_CONFIG                   0x02e0
+#define RADEON_DDA_ON_OFF                   0x02e4
+#define RADEON_DEFAULT_OFFSET               0x16e0
+#define RADEON_DEFAULT_PITCH                0x16e4
+#define RADEON_DEFAULT_SC_BOTTOM_RIGHT      0x16e8
+#       define RADEON_DEFAULT_SC_RIGHT_MAX  (0x1fff <<  0)
+#       define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
+#define RADEON_DESTINATION_3D_CLR_CMP_VAL   0x1820
+#define RADEON_DESTINATION_3D_CLR_CMP_MSK   0x1824
+#define RADEON_DEVICE_ID                    0x0f02 /* PCI */
+#define RADEON_DISP_MISC_CNTL               0x0d00
+#       define RADEON_SOFT_RESET_GRPH_PP    (1 << 0)
+#define RADEON_DISP_MERGE_CNTL		  0x0d60
+#       define RADEON_DISP_ALPHA_MODE_MASK  0x03
+#       define RADEON_DISP_ALPHA_MODE_KEY   0
+#       define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
+#       define RADEON_DISP_ALPHA_MODE_GLOBAL 2
+#       define RADEON_DISP_RGB_OFFSET_EN    (1 << 8)
+#       define RADEON_DISP_GRPH_ALPHA_MASK  (0xff << 16)
+#       define RADEON_DISP_OV0_ALPHA_MASK   (0xff << 24)
+#	define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
+#define RADEON_DISP2_MERGE_CNTL		    0x0d68
+#       define RADEON_DISP2_RGB_OFFSET_EN   (1 << 8)
+#define RADEON_DISP_LIN_TRANS_GRPH_A        0x0d80
+#define RADEON_DISP_LIN_TRANS_GRPH_B        0x0d84
+#define RADEON_DISP_LIN_TRANS_GRPH_C        0x0d88
+#define RADEON_DISP_LIN_TRANS_GRPH_D        0x0d8c
+#define RADEON_DISP_LIN_TRANS_GRPH_E        0x0d90
+#define RADEON_DISP_LIN_TRANS_GRPH_F        0x0d98
+#define RADEON_DP_BRUSH_BKGD_CLR            0x1478
+#define RADEON_DP_BRUSH_FRGD_CLR            0x147c
+#define RADEON_DP_CNTL                      0x16c0
+#       define RADEON_DST_X_LEFT_TO_RIGHT   (1 <<  0)
+#       define RADEON_DST_Y_TOP_TO_BOTTOM   (1 <<  1)
+#       define RADEON_DP_DST_TILE_LINEAR    (0 <<  3)
+#       define RADEON_DP_DST_TILE_MACRO     (1 <<  3)
+#       define RADEON_DP_DST_TILE_MICRO     (2 <<  3)
+#       define RADEON_DP_DST_TILE_BOTH      (3 <<  3)
+#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR     0x16d0
+#       define RADEON_DST_Y_MAJOR             (1 <<  2)
+#       define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
+#       define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1U << 31)
+#define RADEON_DP_DATATYPE                  0x16c4
+#       define RADEON_HOST_BIG_ENDIAN_EN    (1 << 29)
+#define RADEON_DP_GUI_MASTER_CNTL           0x146c
+#       define RADEON_GMC_SRC_PITCH_OFFSET_CNTL   (1    <<  0)
+#       define RADEON_GMC_DST_PITCH_OFFSET_CNTL   (1    <<  1)
+#       define RADEON_GMC_SRC_CLIPPING            (1    <<  2)
+#       define RADEON_GMC_DST_CLIPPING            (1    <<  3)
+#       define RADEON_GMC_BRUSH_DATATYPE_MASK     (0x0f <<  4)
+#       define RADEON_GMC_BRUSH_8X8_MONO_FG_BG    (0    <<  4)
+#       define RADEON_GMC_BRUSH_8X8_MONO_FG_LA    (1    <<  4)
+#       define RADEON_GMC_BRUSH_1X8_MONO_FG_BG    (4    <<  4)
+#       define RADEON_GMC_BRUSH_1X8_MONO_FG_LA    (5    <<  4)
+#       define RADEON_GMC_BRUSH_32x1_MONO_FG_BG   (6    <<  4)
+#       define RADEON_GMC_BRUSH_32x1_MONO_FG_LA   (7    <<  4)
+#       define RADEON_GMC_BRUSH_32x32_MONO_FG_BG  (8    <<  4)
+#       define RADEON_GMC_BRUSH_32x32_MONO_FG_LA  (9    <<  4)
+#       define RADEON_GMC_BRUSH_8x8_COLOR         (10   <<  4)
+#       define RADEON_GMC_BRUSH_1X8_COLOR         (12   <<  4)
+#       define RADEON_GMC_BRUSH_SOLID_COLOR       (13   <<  4)
+#       define RADEON_GMC_BRUSH_NONE              (15   <<  4)
+#       define RADEON_GMC_DST_8BPP_CI             (2    <<  8)
+#       define RADEON_GMC_DST_15BPP               (3    <<  8)
+#       define RADEON_GMC_DST_16BPP               (4    <<  8)
+#       define RADEON_GMC_DST_24BPP               (5    <<  8)
+#       define RADEON_GMC_DST_32BPP               (6    <<  8)
+#       define RADEON_GMC_DST_8BPP_RGB            (7    <<  8)
+#       define RADEON_GMC_DST_Y8                  (8    <<  8)
+#       define RADEON_GMC_DST_RGB8                (9    <<  8)
+#       define RADEON_GMC_DST_VYUY                (11   <<  8)
+#       define RADEON_GMC_DST_YVYU                (12   <<  8)
+#       define RADEON_GMC_DST_AYUV444             (14   <<  8)
+#       define RADEON_GMC_DST_ARGB4444            (15   <<  8)
+#       define RADEON_GMC_DST_DATATYPE_MASK       (0x0f <<  8)
+#       define RADEON_GMC_DST_DATATYPE_SHIFT      8
+#       define RADEON_GMC_SRC_DATATYPE_MASK       (3    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_COLOR      (3    << 12)
+#       define RADEON_GMC_BYTE_PIX_ORDER          (1    << 14)
+#       define RADEON_GMC_BYTE_MSB_TO_LSB         (0    << 14)
+#       define RADEON_GMC_BYTE_LSB_TO_MSB         (1    << 14)
+#       define RADEON_GMC_CONVERSION_TEMP         (1    << 15)
+#       define RADEON_GMC_CONVERSION_TEMP_6500    (0    << 15)
+#       define RADEON_GMC_CONVERSION_TEMP_9300    (1    << 15)
+#       define RADEON_GMC_ROP3_MASK               (0xff << 16)
+#       define RADEON_DP_SRC_SOURCE_MASK          (7    << 24)
+#       define RADEON_DP_SRC_SOURCE_MEMORY        (2    << 24)
+#       define RADEON_DP_SRC_SOURCE_HOST_DATA     (3    << 24)
+#       define RADEON_GMC_3D_FCN_EN               (1    << 27)
+#       define RADEON_GMC_CLR_CMP_CNTL_DIS        (1    << 28)
+#       define RADEON_GMC_AUX_CLIP_DIS            (1    << 29)
+#       define RADEON_GMC_WR_MSK_DIS              (1    << 30)
+#       define RADEON_GMC_LD_BRUSH_Y_X            (1    << 31)
+#       define RADEON_ROP3_ZERO             0x00000000
+#       define RADEON_ROP3_DSa              0x00880000
+#       define RADEON_ROP3_SDna             0x00440000
+#       define RADEON_ROP3_S                0x00cc0000
+#       define RADEON_ROP3_DSna             0x00220000
+#       define RADEON_ROP3_D                0x00aa0000
+#       define RADEON_ROP3_DSx              0x00660000
+#       define RADEON_ROP3_DSo              0x00ee0000
+#       define RADEON_ROP3_DSon             0x00110000
+#       define RADEON_ROP3_DSxn             0x00990000
+#       define RADEON_ROP3_Dn               0x00550000
+#       define RADEON_ROP3_SDno             0x00dd0000
+#       define RADEON_ROP3_Sn               0x00330000
+#       define RADEON_ROP3_DSno             0x00bb0000
+#       define RADEON_ROP3_DSan             0x00770000
+#       define RADEON_ROP3_ONE              0x00ff0000
+#       define RADEON_ROP3_DPa              0x00a00000
+#       define RADEON_ROP3_PDna             0x00500000
+#       define RADEON_ROP3_P                0x00f00000
+#       define RADEON_ROP3_DPna             0x000a0000
+#       define RADEON_ROP3_D                0x00aa0000
+#       define RADEON_ROP3_DPx              0x005a0000
+#       define RADEON_ROP3_DPo              0x00fa0000
+#       define RADEON_ROP3_DPon             0x00050000
+#       define RADEON_ROP3_PDxn             0x00a50000
+#       define RADEON_ROP3_PDno             0x00f50000
+#       define RADEON_ROP3_Pn               0x000f0000
+#       define RADEON_ROP3_DPno             0x00af0000
+#       define RADEON_ROP3_DPan             0x005f0000
+#define RADEON_DP_GUI_MASTER_CNTL_C         0x1c84
+#define RADEON_DP_MIX                       0x16c8
+#define RADEON_DP_SRC_BKGD_CLR              0x15dc
+#define RADEON_DP_SRC_FRGD_CLR              0x15d8
+#define RADEON_DP_WRITE_MASK                0x16cc
+#define RADEON_DST_BRES_DEC                 0x1630
+#define RADEON_DST_BRES_ERR                 0x1628
+#define RADEON_DST_BRES_INC                 0x162c
+#define RADEON_DST_BRES_LNTH                0x1634
+#define RADEON_DST_BRES_LNTH_SUB            0x1638
+#define RADEON_DST_HEIGHT                   0x1410
+#define RADEON_DST_HEIGHT_WIDTH             0x143c
+#define RADEON_DST_HEIGHT_WIDTH_8           0x158c
+#define RADEON_DST_HEIGHT_WIDTH_BW          0x15b4
+#define RADEON_DST_HEIGHT_Y                 0x15a0
+#define RADEON_DST_LINE_START               0x1600
+#define RADEON_DST_LINE_END                 0x1604
+#define RADEON_DST_LINE_PATCOUNT            0x1608
+#       define RADEON_BRES_CNTL_SHIFT       8
+#define RADEON_DST_OFFSET                   0x1404
+#define RADEON_DST_PITCH                    0x1408
+#define RADEON_DST_PITCH_OFFSET             0x142c
+#define RADEON_DST_PITCH_OFFSET_C           0x1c80
+#       define RADEON_PITCH_SHIFT           21
+#       define RADEON_DST_TILE_LINEAR       (0 << 30)
+#       define RADEON_DST_TILE_MACRO        (1 << 30)
+#       define RADEON_DST_TILE_MICRO        (2U << 30)
+#       define RADEON_DST_TILE_BOTH         (3U << 30)
+#define RADEON_DST_WIDTH                    0x140c
+#define RADEON_DST_WIDTH_HEIGHT             0x1598
+#define RADEON_DST_WIDTH_X                  0x1588
+#define RADEON_DST_WIDTH_X_INCY             0x159c
+#define RADEON_DST_X                        0x141c
+#define RADEON_DST_X_SUB                    0x15a4
+#define RADEON_DST_X_Y                      0x1594
+#define RADEON_DST_Y                        0x1420
+#define RADEON_DST_Y_SUB                    0x15a8
+#define RADEON_DST_Y_X                      0x1438
+
+#define RADEON_FCP_CNTL                     0x0910
+#      define RADEON_FCP0_SRC_PCICLK             0
+#      define RADEON_FCP0_SRC_PCLK               1
+#      define RADEON_FCP0_SRC_PCLKb              2
+#      define RADEON_FCP0_SRC_HREF               3
+#      define RADEON_FCP0_SRC_GND                4
+#      define RADEON_FCP0_SRC_HREFb              5
+#define RADEON_FLUSH_1                      0x1704
+#define RADEON_FLUSH_2                      0x1708
+#define RADEON_FLUSH_3                      0x170c
+#define RADEON_FLUSH_4                      0x1710
+#define RADEON_FLUSH_5                      0x1714
+#define RADEON_FLUSH_6                      0x1718
+#define RADEON_FLUSH_7                      0x171c
+#define RADEON_FOG_3D_TABLE_START           0x1810
+#define RADEON_FOG_3D_TABLE_END             0x1814
+#define RADEON_FOG_3D_TABLE_DENSITY         0x181c
+#define RADEON_FOG_TABLE_INDEX              0x1a14
+#define RADEON_FOG_TABLE_DATA               0x1a18
+#define RADEON_FP_CRTC_H_TOTAL_DISP         0x0250
+#define RADEON_FP_CRTC_V_TOTAL_DISP         0x0254
+#       define RADEON_FP_CRTC_H_TOTAL_MASK      0x000003ff
+#       define RADEON_FP_CRTC_H_DISP_MASK       0x01ff0000
+#       define RADEON_FP_CRTC_V_TOTAL_MASK      0x00000fff
+#       define RADEON_FP_CRTC_V_DISP_MASK       0x0fff0000
+#       define RADEON_FP_H_SYNC_STRT_CHAR_MASK  0x00001ff8
+#       define RADEON_FP_H_SYNC_WID_MASK        0x003f0000
+#       define RADEON_FP_V_SYNC_STRT_MASK       0x00000fff
+#       define RADEON_FP_V_SYNC_WID_MASK        0x001f0000
+#       define RADEON_FP_CRTC_H_TOTAL_SHIFT     0x00000000
+#       define RADEON_FP_CRTC_H_DISP_SHIFT      0x00000010
+#       define RADEON_FP_CRTC_V_TOTAL_SHIFT     0x00000000
+#       define RADEON_FP_CRTC_V_DISP_SHIFT      0x00000010
+#       define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
+#       define RADEON_FP_H_SYNC_WID_SHIFT       0x00000010
+#       define RADEON_FP_V_SYNC_STRT_SHIFT      0x00000000
+#       define RADEON_FP_V_SYNC_WID_SHIFT       0x00000010
+#define RADEON_FP_GEN_CNTL                  0x0284
+#       define RADEON_FP_FPON                  (1 <<  0)
+#       define RADEON_FP_BLANK_EN              (1 <<  1)
+#       define RADEON_FP_TMDS_EN               (1 <<  2)
+#       define RADEON_FP_PANEL_FORMAT          (1 <<  3)
+#       define RADEON_FP_EN_TMDS               (1 <<  7)
+#       define RADEON_FP_DETECT_SENSE          (1 <<  8)
+#       define RADEON_FP_DETECT_INT_POL        (1 <<  9)
+#       define R200_FP_SOURCE_SEL_MASK         (3 <<  10)
+#       define R200_FP_SOURCE_SEL_CRTC1        (0 <<  10)
+#       define R200_FP_SOURCE_SEL_CRTC2        (1 <<  10)
+#       define R200_FP_SOURCE_SEL_RMX          (2 <<  10)
+#       define R200_FP_SOURCE_SEL_TRANS        (3 <<  10)
+#       define RADEON_FP_SEL_CRTC1             (0 << 13)
+#       define RADEON_FP_SEL_CRTC2             (1 << 13)
+#       define R300_HPD_SEL(x)                 ((x) << 13)
+#       define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
+#       define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
+#       define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
+#       define RADEON_FP_CRTC_USE_SHADOW_VEND  (1 << 18)
+#       define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
+#       define RADEON_FP_DFP_SYNC_SEL          (1 << 21)
+#       define RADEON_FP_CRTC_LOCK_8DOT        (1 << 22)
+#       define RADEON_FP_CRT_SYNC_SEL          (1 << 23)
+#       define RADEON_FP_USE_SHADOW_EN         (1 << 24)
+#       define RADEON_FP_CRT_SYNC_ALT          (1 << 26)
+#define RADEON_FP2_GEN_CNTL                 0x0288
+#       define RADEON_FP2_BLANK_EN             (1 <<  1)
+#       define RADEON_FP2_ON                   (1 <<  2)
+#       define RADEON_FP2_PANEL_FORMAT         (1 <<  3)
+#       define RADEON_FP2_DETECT_SENSE         (1 <<  8)
+#       define RADEON_FP2_DETECT_INT_POL       (1 <<  9)
+#       define R200_FP2_SOURCE_SEL_MASK        (3 << 10)
+#       define R200_FP2_SOURCE_SEL_CRTC1       (0 << 10)
+#       define R200_FP2_SOURCE_SEL_CRTC2       (1 << 10)
+#       define R200_FP2_SOURCE_SEL_RMX         (2 << 10)
+#       define R200_FP2_SOURCE_SEL_TRANS_UNIT  (3 << 10)
+#       define RADEON_FP2_SRC_SEL_MASK         (3 << 13)
+#       define RADEON_FP2_SRC_SEL_CRTC2        (1 << 13)
+#       define RADEON_FP2_FP_POL               (1 << 16)
+#       define RADEON_FP2_LP_POL               (1 << 17)
+#       define RADEON_FP2_SCK_POL              (1 << 18)
+#       define RADEON_FP2_LCD_CNTL_MASK        (7 << 19)
+#       define RADEON_FP2_PAD_FLOP_EN          (1 << 22)
+#       define RADEON_FP2_CRC_EN               (1 << 23)
+#       define RADEON_FP2_CRC_READ_EN          (1 << 24)
+#       define RADEON_FP2_DVO_EN               (1 << 25)
+#       define RADEON_FP2_DVO_RATE_SEL_SDR     (1 << 26)
+#       define R200_FP2_DVO_RATE_SEL_SDR       (1 << 27)
+#       define R300_FP2_DVO_CLOCK_MODE_SINGLE  (1 << 28)
+#       define R300_FP2_DVO_DUAL_CHANNEL_EN    (1 << 29)
+#define RADEON_FP_H_SYNC_STRT_WID           0x02c4
+#define RADEON_FP_H2_SYNC_STRT_WID          0x03c4
+#define RADEON_FP_HORZ_STRETCH              0x028c
+#define RADEON_FP_HORZ2_STRETCH             0x038c
+#       define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
+#       define RADEON_HORZ_STRETCH_RATIO_MAX  4096
+#       define RADEON_HORZ_PANEL_SIZE         (0x1ff   << 16)
+#       define RADEON_HORZ_PANEL_SHIFT        16
+#       define RADEON_HORZ_STRETCH_PIXREP     (0      << 25)
+#       define RADEON_HORZ_STRETCH_BLEND      (1      << 26)
+#       define RADEON_HORZ_STRETCH_ENABLE     (1      << 25)
+#       define RADEON_HORZ_AUTO_RATIO         (1      << 27)
+#       define RADEON_HORZ_FP_LOOP_STRETCH    (0x7    << 28)
+#       define RADEON_HORZ_AUTO_RATIO_INC     (1      << 31)
+#define RADEON_FP_HORZ_VERT_ACTIVE          0x0278
+#define RADEON_FP_V_SYNC_STRT_WID           0x02c8
+#define RADEON_FP_VERT_STRETCH              0x0290
+#define RADEON_FP_V2_SYNC_STRT_WID          0x03c8
+#define RADEON_FP_VERT2_STRETCH             0x0390
+#       define RADEON_VERT_PANEL_SIZE          (0xfff << 12)
+#       define RADEON_VERT_PANEL_SHIFT         12
+#       define RADEON_VERT_STRETCH_RATIO_MASK  0xfff
+#       define RADEON_VERT_STRETCH_RATIO_SHIFT 0
+#       define RADEON_VERT_STRETCH_RATIO_MAX   4096
+#       define RADEON_VERT_STRETCH_ENABLE      (1     << 25)
+#       define RADEON_VERT_STRETCH_LINEREP     (0     << 26)
+#       define RADEON_VERT_STRETCH_BLEND       (1     << 26)
+#       define RADEON_VERT_AUTO_RATIO_EN       (1     << 27)
+#	define RADEON_VERT_AUTO_RATIO_INC      (1     << 31)
+#       define RADEON_VERT_STRETCH_RESERVED    0x71000000
+#define RS400_FP_2ND_GEN_CNTL               0x0384
+#       define RS400_FP_2ND_ON              (1 << 0)
+#       define RS400_FP_2ND_BLANK_EN        (1 << 1)
+#       define RS400_TMDS_2ND_EN            (1 << 2)
+#       define RS400_PANEL_FORMAT_2ND       (1 << 3)
+#       define RS400_FP_2ND_EN_TMDS         (1 << 7)
+#       define RS400_FP_2ND_DETECT_SENSE    (1 << 8)
+#       define RS400_FP_2ND_SOURCE_SEL_MASK        (3 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_CRTC1       (0 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_CRTC2       (1 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_RMX         (2 << 10)
+#       define RS400_FP_2ND_DETECT_EN       (1 << 12)
+#       define RS400_HPD_2ND_SEL            (1 << 13)
+#define RS400_FP2_2_GEN_CNTL                0x0388
+#       define RS400_FP2_2_BLANK_EN         (1 << 1)
+#       define RS400_FP2_2_ON               (1 << 2)
+#       define RS400_FP2_2_PANEL_FORMAT     (1 << 3)
+#       define RS400_FP2_2_DETECT_SENSE     (1 << 8)
+#       define RS400_FP2_2_SOURCE_SEL_MASK        (3 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_CRTC1       (0 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_CRTC2       (1 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_RMX         (2 << 10)
+#       define RS400_FP2_2_DVO2_EN          (1 << 25)
+#define RS400_TMDS2_CNTL                    0x0394
+#define RS400_TMDS2_TRANSMITTER_CNTL        0x03a4
+#       define RS400_TMDS2_PLLEN            (1 << 0)
+#       define RS400_TMDS2_PLLRST           (1 << 1)
+
+#define RADEON_GEN_INT_CNTL                 0x0040
+#	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_FP_DETECT_MASK		(1 << 4)
+#	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
+#	define RADEON_FP2_DETECT_MASK		(1 << 10)
+#	define RADEON_GUI_IDLE_MASK		(1 << 19)
+#	define RADEON_SW_INT_ENABLE		(1 << 25)
+#define RADEON_GEN_INT_STATUS               0x0044
+#	define AVIVO_DISPLAY_INT_STATUS		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
+#	define RADEON_FP_DETECT_STAT		(1 << 4)
+#	define RADEON_FP_DETECT_STAT_ACK	(1 << 4)
+#	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
+#	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
+#	define RADEON_FP2_DETECT_STAT		(1 << 10)
+#	define RADEON_FP2_DETECT_STAT_ACK	(1 << 10)
+#	define RADEON_GUI_IDLE_STAT		(1 << 19)
+#	define RADEON_GUI_IDLE_STAT_ACK		(1 << 19)
+#	define RADEON_SW_INT_FIRE		(1 << 26)
+#	define RADEON_SW_INT_TEST		(1 << 25)
+#	define RADEON_SW_INT_TEST_ACK		(1 << 25)
+#define RADEON_GENENB                       0x03c3 /* VGA */
+#define RADEON_GENFC_RD                     0x03ca /* VGA */
+#define RADEON_GENFC_WT                     0x03da /* VGA, 0x03ba */
+#define RADEON_GENMO_RD                     0x03cc /* VGA */
+#define RADEON_GENMO_WT                     0x03c2 /* VGA */
+#define RADEON_GENS0                        0x03c2 /* VGA */
+#define RADEON_GENS1                        0x03da /* VGA, 0x03ba */
+#define RADEON_GPIO_MONID                   0x0068 /* DDC interface via I2C */ /* DDC3 */
+#define RADEON_GPIO_MONIDB                  0x006c
+#define RADEON_GPIO_CRT2_DDC                0x006c
+#define RADEON_GPIO_DVI_DDC                 0x0064 /* DDC2 */
+#define RADEON_GPIO_VGA_DDC                 0x0060 /* DDC1 */
+#       define RADEON_GPIO_A_0              (1 <<  0)
+#       define RADEON_GPIO_A_1              (1 <<  1)
+#       define RADEON_GPIO_Y_0              (1 <<  8)
+#       define RADEON_GPIO_Y_1              (1 <<  9)
+#       define RADEON_GPIO_Y_SHIFT_0        8
+#       define RADEON_GPIO_Y_SHIFT_1        9
+#       define RADEON_GPIO_EN_0             (1 << 16)
+#       define RADEON_GPIO_EN_1             (1 << 17)
+#       define RADEON_GPIO_MASK_0           (1 << 24) /*??*/
+#       define RADEON_GPIO_MASK_1           (1 << 25) /*??*/
+#define RADEON_GRPH8_DATA                   0x03cf /* VGA */
+#define RADEON_GRPH8_IDX                    0x03ce /* VGA */
+#define RADEON_GUI_SCRATCH_REG0             0x15e0
+#define RADEON_GUI_SCRATCH_REG1             0x15e4
+#define RADEON_GUI_SCRATCH_REG2             0x15e8
+#define RADEON_GUI_SCRATCH_REG3             0x15ec
+#define RADEON_GUI_SCRATCH_REG4             0x15f0
+#define RADEON_GUI_SCRATCH_REG5             0x15f4
+
+#define RADEON_HEADER                       0x0f0e /* PCI */
+#define RADEON_HOST_DATA0                   0x17c0
+#define RADEON_HOST_DATA1                   0x17c4
+#define RADEON_HOST_DATA2                   0x17c8
+#define RADEON_HOST_DATA3                   0x17cc
+#define RADEON_HOST_DATA4                   0x17d0
+#define RADEON_HOST_DATA5                   0x17d4
+#define RADEON_HOST_DATA6                   0x17d8
+#define RADEON_HOST_DATA7                   0x17dc
+#define RADEON_HOST_DATA_LAST               0x17e0
+#define RADEON_HOST_PATH_CNTL               0x0130
+#	define RADEON_HP_LIN_RD_CACHE_DIS   (1 << 24)
+#	define RADEON_HDP_READ_BUFFER_INVALIDATE   (1 << 27)
+#       define RADEON_HDP_SOFT_RESET        (1 << 26)
+#       define RADEON_HDP_APER_CNTL         (1 << 23)
+#define RADEON_HTOTAL_CNTL                  0x0009 /* PLL */
+#       define RADEON_HTOT_CNTL_VGA_EN      (1 << 28)
+#define RADEON_HTOTAL2_CNTL                 0x002e /* PLL */
+
+       /* Multimedia I2C bus */
+#define RADEON_I2C_CNTL_0		    0x0090
+#       define RADEON_I2C_DONE              (1 << 0)
+#       define RADEON_I2C_NACK              (1 << 1)
+#       define RADEON_I2C_HALT              (1 << 2)
+#       define RADEON_I2C_SOFT_RST          (1 << 5)
+#       define RADEON_I2C_DRIVE_EN          (1 << 6)
+#       define RADEON_I2C_DRIVE_SEL         (1 << 7)
+#       define RADEON_I2C_START             (1 << 8)
+#       define RADEON_I2C_STOP              (1 << 9)
+#       define RADEON_I2C_RECEIVE           (1 << 10)
+#       define RADEON_I2C_ABORT             (1 << 11)
+#       define RADEON_I2C_GO                (1 << 12)
+#       define RADEON_I2C_PRESCALE_SHIFT    16
+#define RADEON_I2C_CNTL_1                   0x0094
+#       define RADEON_I2C_DATA_COUNT_SHIFT  0
+#       define RADEON_I2C_ADDR_COUNT_SHIFT  4
+#       define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT   8
+#       define RADEON_I2C_SEL               (1 << 16)
+#       define RADEON_I2C_EN                (1 << 17)
+#       define RADEON_I2C_TIME_LIMIT_SHIFT  24
+#define RADEON_I2C_DATA			    0x0098
+
+#define RADEON_DVI_I2C_CNTL_0		    0x02e0
+#       define R200_DVI_I2C_PIN_SEL(x)      ((x) << 3)
+#       define R200_SEL_DDC1                0 /* depends on asic */
+#       define R200_SEL_DDC2                1 /* depends on asic */
+#       define R200_SEL_DDC3                2 /* depends on asic */
+#	define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
+#	define RADEON_SW_CAN_USE_DVI_I2C      (1 << 13)
+#	define RADEON_SW_DONE_USING_DVI_I2C   (1 << 14)
+#	define RADEON_HW_NEEDS_DVI_I2C        (1 << 14)
+#	define RADEON_ABORT_HW_DVI_I2C        (1 << 15)
+#	define RADEON_HW_USING_DVI_I2C        (1 << 15)
+#define RADEON_DVI_I2C_CNTL_1               0x02e4
+#define RADEON_DVI_I2C_DATA		    0x02e8
+
+#define RADEON_INTERRUPT_LINE               0x0f3c /* PCI */
+#define RADEON_INTERRUPT_PIN                0x0f3d /* PCI */
+#define RADEON_IO_BASE                      0x0f14 /* PCI */
+
+#define RADEON_LATENCY                      0x0f0d /* PCI */
+#define RADEON_LEAD_BRES_DEC                0x1608
+#define RADEON_LEAD_BRES_LNTH               0x161c
+#define RADEON_LEAD_BRES_LNTH_SUB           0x1624
+#define RADEON_LVDS_GEN_CNTL                0x02d0
+#       define RADEON_LVDS_ON               (1   <<  0)
+#       define RADEON_LVDS_DISPLAY_DIS      (1   <<  1)
+#       define RADEON_LVDS_PANEL_TYPE       (1   <<  2)
+#       define RADEON_LVDS_PANEL_FORMAT     (1   <<  3)
+#       define RADEON_LVDS_NO_FM            (0   <<  4)
+#       define RADEON_LVDS_2_GREY           (1   <<  4)
+#       define RADEON_LVDS_4_GREY           (2   <<  4)
+#       define RADEON_LVDS_RST_FM           (1   <<  6)
+#       define RADEON_LVDS_EN               (1   <<  7)
+#       define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
+#       define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
+#       define RADEON_LVDS_BL_MOD_EN        (1   << 16)
+#       define RADEON_LVDS_BL_CLK_SEL       (1   << 17)
+#       define RADEON_LVDS_DIGON            (1   << 18)
+#       define RADEON_LVDS_BLON             (1   << 19)
+#       define RADEON_LVDS_FP_POL_LOW       (1   << 20)
+#       define RADEON_LVDS_LP_POL_LOW       (1   << 21)
+#       define RADEON_LVDS_DTM_POL_LOW      (1   << 22)
+#       define RADEON_LVDS_SEL_CRTC2        (1   << 23)
+#       define RADEON_LVDS_FPDI_EN          (1   << 27)
+#       define RADEON_LVDS_HSYNC_DELAY_SHIFT        28
+#define RADEON_LVDS_PLL_CNTL                0x02d4
+#       define RADEON_HSYNC_DELAY_SHIFT     28
+#       define RADEON_HSYNC_DELAY_MASK      (0xf << 28)
+#       define RADEON_LVDS_PLL_EN           (1   << 16)
+#       define RADEON_LVDS_PLL_RESET        (1   << 17)
+#       define R300_LVDS_SRC_SEL_MASK       (3   << 18)
+#       define R300_LVDS_SRC_SEL_CRTC1      (0   << 18)
+#       define R300_LVDS_SRC_SEL_CRTC2      (1   << 18)
+#       define R300_LVDS_SRC_SEL_RMX        (2   << 18)
+#define RADEON_LVDS_SS_GEN_CNTL             0x02ec
+#       define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT     16
+#       define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT     20
+
+#define RADEON_MAX_LATENCY                  0x0f3f /* PCI */
+#define RADEON_DISPLAY_BASE_ADDR            0x23c
+#define RADEON_DISPLAY2_BASE_ADDR           0x33c
+#define RADEON_OV0_BASE_ADDR                0x43c
+#define RADEON_NB_TOM                       0x15c
+#define R300_MC_INIT_MISC_LAT_TIMER         0x180
+#       define R300_MC_DISP0R_INIT_LAT_SHIFT 8
+#       define R300_MC_DISP0R_INIT_LAT_MASK  0xf
+#       define R300_MC_DISP1R_INIT_LAT_SHIFT 12
+#       define R300_MC_DISP1R_INIT_LAT_MASK  0xf
+#define RADEON_MCLK_CNTL                    0x0012 /* PLL */
+#       define RADEON_MCLKA_SRC_SEL_MASK    0x7
+#       define RADEON_FORCEON_MCLKA         (1 << 16)
+#       define RADEON_FORCEON_MCLKB         (1 << 17)
+#       define RADEON_FORCEON_YCLKA         (1 << 18)
+#       define RADEON_FORCEON_YCLKB         (1 << 19)
+#       define RADEON_FORCEON_MC            (1 << 20)
+#       define RADEON_FORCEON_AIC           (1 << 21)
+#       define R300_DISABLE_MC_MCLKA        (1 << 21)
+#       define R300_DISABLE_MC_MCLKB        (1 << 21)
+#define RADEON_MCLK_MISC                    0x001f /* PLL */
+#       define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12)
+#       define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
+#       define RADEON_MC_MCLK_DYN_ENABLE    (1 << 14)
+#       define RADEON_IO_MCLK_DYN_ENABLE    (1 << 15)
+
+#define RADEON_GPIOPAD_MASK                 0x0198
+#define RADEON_GPIOPAD_A		    0x019c
+#define RADEON_GPIOPAD_EN                   0x01a0
+#define RADEON_GPIOPAD_Y                    0x01a4
+#define RADEON_MDGPIO_MASK                  0x01a8
+#define RADEON_MDGPIO_A                     0x01ac
+#define RADEON_MDGPIO_EN                    0x01b0
+#define RADEON_MDGPIO_Y                     0x01b4
+
+#define RADEON_MEM_ADDR_CONFIG              0x0148
+#define RADEON_MEM_BASE                     0x0f10 /* PCI */
+#define RADEON_MEM_CNTL                     0x0140
+#       define RADEON_MEM_NUM_CHANNELS_MASK 0x01
+#       define RADEON_MEM_USE_B_CH_ONLY     (1 <<  1)
+#       define RV100_HALF_MODE              (1 <<  3)
+#       define R300_MEM_NUM_CHANNELS_MASK   0x03
+#       define R300_MEM_USE_CD_CH_ONLY      (1 <<  2)
+#define RADEON_MEM_TIMING_CNTL              0x0144 /* EXT_MEM_CNTL */
+#define RADEON_MEM_INIT_LAT_TIMER           0x0154
+#define RADEON_MEM_INTF_CNTL                0x014c
+#define RADEON_MEM_SDRAM_MODE_REG           0x0158
+#       define RADEON_SDRAM_MODE_MASK       0xffff0000
+#       define RADEON_B3MEM_RESET_MASK      0x6fffffff
+#       define RADEON_MEM_CFG_TYPE_DDR      (1 << 30)
+#define RADEON_MEM_STR_CNTL                 0x0150
+#       define RADEON_MEM_PWRUP_COMPL_A     (1 <<  0)
+#       define RADEON_MEM_PWRUP_COMPL_B     (1 <<  1)
+#       define R300_MEM_PWRUP_COMPL_C       (1 <<  2)
+#       define R300_MEM_PWRUP_COMPL_D       (1 <<  3)
+#       define RADEON_MEM_PWRUP_COMPLETE    0x03
+#       define R300_MEM_PWRUP_COMPLETE      0x0f
+#define RADEON_MC_STATUS                    0x0150
+#       define RADEON_MC_IDLE               (1 << 2)
+#       define R300_MC_IDLE                 (1 << 4)
+#define RADEON_MEM_VGA_RP_SEL               0x003c
+#define RADEON_MEM_VGA_WP_SEL               0x0038
+#define RADEON_MIN_GRANT                    0x0f3e /* PCI */
+#define RADEON_MM_DATA                      0x0004
+#define RADEON_MM_INDEX                     0x0000
+#	define RADEON_MM_APER		(1U << 31)
+#define RADEON_MPLL_CNTL                    0x000e /* PLL */
+#define RADEON_MPP_TB_CONFIG                0x01c0 /* ? */
+#define RADEON_MPP_GP_CONFIG                0x01c8 /* ? */
+#define RADEON_SEPROM_CNTL1                 0x01c0
+#       define RADEON_SCK_PRESCALE_SHIFT    24
+#       define RADEON_SCK_PRESCALE_MASK     (0xff << 24)
+#define R300_MC_IND_INDEX                   0x01f8
+#       define R300_MC_IND_ADDR_MASK        0x3f
+#       define R300_MC_IND_WR_EN            (1 << 8)
+#define R300_MC_IND_DATA                    0x01fc
+#define R300_MC_READ_CNTL_AB                0x017c
+#       define R300_MEM_RBS_POSITION_A_MASK 0x03
+#define R300_MC_READ_CNTL_CD_mcind	    0x24
+#       define R300_MEM_RBS_POSITION_C_MASK 0x03
+
+#define RADEON_N_VIF_COUNT                  0x0248
+
+#define RADEON_OV0_AUTO_FLIP_CNTL           0x0470
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM        0x00000007
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD   0x00000008
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD        0x00000010
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE     0x00000040
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT     0x00000300
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN  0x00010000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN     0x00040000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN      0x00080000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE    0x00800000
+
+#define RADEON_OV0_COLOUR_CNTL              0x04E0
+#define RADEON_OV0_DEINTERLACE_PATTERN      0x0474
+#define RADEON_OV0_EXCLUSIVE_HORZ           0x0408
+#       define  RADEON_EXCL_HORZ_START_MASK        0x000000ff
+#       define  RADEON_EXCL_HORZ_END_MASK          0x0000ff00
+#       define  RADEON_EXCL_HORZ_BACK_PORCH_MASK   0x00ff0000
+#       define  RADEON_EXCL_HORZ_EXCLUSIVE_EN      0x80000000
+#define RADEON_OV0_EXCLUSIVE_VERT           0x040C
+#       define  RADEON_EXCL_VERT_START_MASK        0x000003ff
+#       define  RADEON_EXCL_VERT_END_MASK          0x03ff0000
+#define RADEON_OV0_FILTER_CNTL              0x04A0
+#       define RADEON_FILTER_PROGRAMMABLE_COEF            0x0
+#       define RADEON_FILTER_HC_COEF_HORZ_Y               0x1
+#       define RADEON_FILTER_HC_COEF_HORZ_UV              0x2
+#       define RADEON_FILTER_HC_COEF_VERT_Y               0x4
+#       define RADEON_FILTER_HC_COEF_VERT_UV              0x8
+#       define RADEON_FILTER_HARDCODED_COEF               0xf
+#       define RADEON_FILTER_COEF_MASK                    0xf
+
+#define RADEON_OV0_FOUR_TAP_COEF_0          0x04B0
+#define RADEON_OV0_FOUR_TAP_COEF_1          0x04B4
+#define RADEON_OV0_FOUR_TAP_COEF_2          0x04B8
+#define RADEON_OV0_FOUR_TAP_COEF_3          0x04BC
+#define RADEON_OV0_FOUR_TAP_COEF_4          0x04C0
+#define RADEON_OV0_FLAG_CNTL                0x04DC
+#define RADEON_OV0_GAMMA_000_00F            0x0d40
+#define RADEON_OV0_GAMMA_010_01F            0x0d44
+#define RADEON_OV0_GAMMA_020_03F            0x0d48
+#define RADEON_OV0_GAMMA_040_07F            0x0d4c
+#define RADEON_OV0_GAMMA_080_0BF            0x0e00
+#define RADEON_OV0_GAMMA_0C0_0FF            0x0e04
+#define RADEON_OV0_GAMMA_100_13F            0x0e08
+#define RADEON_OV0_GAMMA_140_17F            0x0e0c
+#define RADEON_OV0_GAMMA_180_1BF            0x0e10
+#define RADEON_OV0_GAMMA_1C0_1FF            0x0e14
+#define RADEON_OV0_GAMMA_200_23F            0x0e18
+#define RADEON_OV0_GAMMA_240_27F            0x0e1c
+#define RADEON_OV0_GAMMA_280_2BF            0x0e20
+#define RADEON_OV0_GAMMA_2C0_2FF            0x0e24
+#define RADEON_OV0_GAMMA_300_33F            0x0e28
+#define RADEON_OV0_GAMMA_340_37F            0x0e2c
+#define RADEON_OV0_GAMMA_380_3BF            0x0d50
+#define RADEON_OV0_GAMMA_3C0_3FF            0x0d54
+#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW     0x04EC
+#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH    0x04F0
+#define RADEON_OV0_H_INC                    0x0480
+#define RADEON_OV0_KEY_CNTL                 0x04F4
+#       define  RADEON_VIDEO_KEY_FN_MASK    0x00000003L
+#       define  RADEON_VIDEO_KEY_FN_FALSE   0x00000000L
+#       define  RADEON_VIDEO_KEY_FN_TRUE    0x00000001L
+#       define  RADEON_VIDEO_KEY_FN_EQ      0x00000002L
+#       define  RADEON_VIDEO_KEY_FN_NE      0x00000003L
+#       define  RADEON_GRAPHIC_KEY_FN_MASK  0x00000030L
+#       define  RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
+#       define  RADEON_GRAPHIC_KEY_FN_TRUE  0x00000010L
+#       define  RADEON_GRAPHIC_KEY_FN_EQ    0x00000020L
+#       define  RADEON_GRAPHIC_KEY_FN_NE    0x00000030L
+#       define  RADEON_CMP_MIX_MASK         0x00000100L
+#       define  RADEON_CMP_MIX_OR           0x00000000L
+#       define  RADEON_CMP_MIX_AND          0x00000100L
+#define RADEON_OV0_LIN_TRANS_A              0x0d20
+#define RADEON_OV0_LIN_TRANS_B              0x0d24
+#define RADEON_OV0_LIN_TRANS_C              0x0d28
+#define RADEON_OV0_LIN_TRANS_D              0x0d2c
+#define RADEON_OV0_LIN_TRANS_E              0x0d30
+#define RADEON_OV0_LIN_TRANS_F              0x0d34
+#define RADEON_OV0_P1_BLANK_LINES_AT_TOP    0x0430
+#       define  RADEON_P1_BLNK_LN_AT_TOP_M1_MASK   0x00000fffL
+#       define  RADEON_P1_ACTIVE_LINES_M1          0x0fff0000L
+#define RADEON_OV0_P1_H_ACCUM_INIT          0x0488
+#define RADEON_OV0_P1_V_ACCUM_INIT          0x0428
+#       define  RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
+#       define  RADEON_OV0_P1_V_ACCUM_INIT_MASK    0x01ff8000L
+#define RADEON_OV0_P1_X_START_END           0x0494
+#define RADEON_OV0_P2_X_START_END           0x0498
+#define RADEON_OV0_P23_BLANK_LINES_AT_TOP   0x0434
+#       define  RADEON_P23_BLNK_LN_AT_TOP_M1_MASK  0x000007ffL
+#       define  RADEON_P23_ACTIVE_LINES_M1         0x07ff0000L
+#define RADEON_OV0_P23_H_ACCUM_INIT         0x048C
+#define RADEON_OV0_P23_V_ACCUM_INIT         0x042C
+#define RADEON_OV0_P3_X_START_END           0x049C
+#define RADEON_OV0_REG_LOAD_CNTL            0x0410
+#       define  RADEON_REG_LD_CTL_LOCK                 0x00000001L
+#       define  RADEON_REG_LD_CTL_VBLANK_DURING_LOCK   0x00000002L
+#       define  RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
+#       define  RADEON_REG_LD_CTL_LOCK_READBACK        0x00000008L
+#       define  RADEON_REG_LD_CTL_FLIP_READBACK        0x00000010L
+#define RADEON_OV0_SCALE_CNTL               0x0420
+#       define  RADEON_SCALER_HORZ_PICK_NEAREST    0x00000004L
+#       define  RADEON_SCALER_VERT_PICK_NEAREST    0x00000008L
+#       define  RADEON_SCALER_SIGNED_UV            0x00000010L
+#       define  RADEON_SCALER_GAMMA_SEL_MASK       0x00000060L
+#       define  RADEON_SCALER_GAMMA_SEL_BRIGHT     0x00000000L
+#       define  RADEON_SCALER_GAMMA_SEL_G22        0x00000020L
+#       define  RADEON_SCALER_GAMMA_SEL_G18        0x00000040L
+#       define  RADEON_SCALER_GAMMA_SEL_G14        0x00000060L
+#       define  RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
+#       define  RADEON_SCALER_SURFAC_FORMAT        0x00000f00L
+#       define  RADEON_SCALER_SOURCE_15BPP         0x00000300L
+#       define  RADEON_SCALER_SOURCE_16BPP         0x00000400L
+#       define  RADEON_SCALER_SOURCE_32BPP         0x00000600L
+#       define  RADEON_SCALER_SOURCE_YUV9          0x00000900L
+#       define  RADEON_SCALER_SOURCE_YUV12         0x00000A00L
+#       define  RADEON_SCALER_SOURCE_VYUY422       0x00000B00L
+#       define  RADEON_SCALER_SOURCE_YVYU422       0x00000C00L
+#       define  RADEON_SCALER_ADAPTIVE_DEINT       0x00001000L
+#       define  RADEON_SCALER_TEMPORAL_DEINT       0x00002000L
+#       define  RADEON_SCALER_CRTC_SEL             0x00004000L
+#       define  RADEON_SCALER_SMART_SWITCH         0x00008000L
+#       define  RADEON_SCALER_BURST_PER_PLANE      0x007F0000L
+#       define  RADEON_SCALER_DOUBLE_BUFFER        0x01000000L
+#       define  RADEON_SCALER_DIS_LIMIT            0x08000000L
+#       define  RADEON_SCALER_LIN_TRANS_BYPASS     0x10000000L
+#       define  RADEON_SCALER_INT_EMU              0x20000000L
+#       define  RADEON_SCALER_ENABLE               0x40000000L
+#       define  RADEON_SCALER_SOFT_RESET           0x80000000L
+#define RADEON_OV0_STEP_BY                  0x0484
+#define RADEON_OV0_TEST                     0x04F8
+#define RADEON_OV0_V_INC                    0x0424
+#define RADEON_OV0_VID_BUF_PITCH0_VALUE     0x0460
+#define RADEON_OV0_VID_BUF_PITCH1_VALUE     0x0464
+#define RADEON_OV0_VID_BUF0_BASE_ADRS       0x0440
+#       define  RADEON_VIF_BUF0_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF0_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF0_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF1_BASE_ADRS       0x0444
+#       define  RADEON_VIF_BUF1_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF1_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF1_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF2_BASE_ADRS       0x0448
+#       define  RADEON_VIF_BUF2_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF2_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF2_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF3_BASE_ADRS       0x044C
+#define RADEON_OV0_VID_BUF4_BASE_ADRS       0x0450
+#define RADEON_OV0_VID_BUF5_BASE_ADRS       0x0454
+#define RADEON_OV0_VIDEO_KEY_CLR_HIGH       0x04E8
+#define RADEON_OV0_VIDEO_KEY_CLR_LOW        0x04E4
+#define RADEON_OV0_Y_X_START                0x0400
+#define RADEON_OV0_Y_X_END                  0x0404
+#define RADEON_OV1_Y_X_START                0x0600
+#define RADEON_OV1_Y_X_END                  0x0604
+#define RADEON_OVR_CLR                      0x0230
+#define RADEON_OVR_WID_LEFT_RIGHT           0x0234
+#define RADEON_OVR_WID_TOP_BOTTOM           0x0238
+#define RADEON_OVR2_CLR                     0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT          0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM          0x0338
+
+/* first capture unit */
+
+#define RADEON_CAP0_BUF0_OFFSET           0x0920
+#define RADEON_CAP0_BUF1_OFFSET           0x0924
+#define RADEON_CAP0_BUF0_EVEN_OFFSET      0x0928
+#define RADEON_CAP0_BUF1_EVEN_OFFSET      0x092C
+
+#define RADEON_CAP0_BUF_PITCH             0x0930
+#define RADEON_CAP0_V_WINDOW              0x0934
+#define RADEON_CAP0_H_WINDOW              0x0938
+#define RADEON_CAP0_VBI0_OFFSET           0x093C
+#define RADEON_CAP0_VBI1_OFFSET           0x0940
+#define RADEON_CAP0_VBI_V_WINDOW          0x0944
+#define RADEON_CAP0_VBI_H_WINDOW          0x0948
+#define RADEON_CAP0_PORT_MODE_CNTL        0x094C
+#define RADEON_CAP0_TRIG_CNTL             0x0950
+#define RADEON_CAP0_DEBUG                 0x0954
+#define RADEON_CAP0_CONFIG                0x0958
+#       define RADEON_CAP0_CONFIG_CONTINUOS          0x00000001
+#       define RADEON_CAP0_CONFIG_START_FIELD_EVEN   0x00000002
+#       define RADEON_CAP0_CONFIG_START_BUF_GET      0x00000004
+#       define RADEON_CAP0_CONFIG_START_BUF_SET      0x00000008
+#       define RADEON_CAP0_CONFIG_BUF_TYPE_ALT       0x00000010
+#       define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME     0x00000020
+#       define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040
+#       define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE    0x00000080
+#       define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE    0x00000100
+#       define RADEON_CAP0_CONFIG_MIRROR_EN          0x00000200
+#       define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN  0x00000400
+#       define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV    0x00000800
+#       define RADEON_CAP0_CONFIG_ANC_DECODE_EN      0x00001000
+#       define RADEON_CAP0_CONFIG_VBI_EN             0x00002000
+#       define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN  0x00004000
+#       define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000
+#       define RADEON_CAP0_CONFIG_FAKE_FIELD_EN      0x00010000
+#       define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE  0x00020000
+#       define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000
+#       define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2      0x00080000
+#       define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4      0x00100000
+#       define RADEON_CAP0_CONFIG_VERT_DIVIDE_2      0x00200000
+#       define RADEON_CAP0_CONFIG_VERT_DIVIDE_4      0x00400000
+#       define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE   0x00000000
+#       define RADEON_CAP0_CONFIG_FORMAT_CCIR656     0x00800000
+#       define RADEON_CAP0_CONFIG_FORMAT_ZV          0x01000000
+#       define RADEON_CAP0_CONFIG_FORMAT_VIP         0x01800000
+#       define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT   0x02000000
+#       define RADEON_CAP0_CONFIG_HORZ_DECIMATOR     0x04000000
+#       define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422   0x00000000
+#       define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422   0x20000000
+#       define RADEON_CAP0_CONFIG_VBI_DIVIDE_2       0x40000000
+#       define RADEON_CAP0_CONFIG_VBI_DIVIDE_4       0x80000000
+#define RADEON_CAP0_ANC_ODD_OFFSET        0x095C
+#define RADEON_CAP0_ANC_EVEN_OFFSET       0x0960
+#define RADEON_CAP0_ANC_H_WINDOW          0x0964
+#define RADEON_CAP0_VIDEO_SYNC_TEST       0x0968
+#define RADEON_CAP0_ONESHOT_BUF_OFFSET    0x096C
+#define RADEON_CAP0_BUF_STATUS            0x0970
+/* #define RADEON_CAP0_DWNSC_XRATIO       0x0978 */
+/* #define RADEON_CAP0_XSHARPNESS                 0x097C */
+#define RADEON_CAP0_VBI2_OFFSET           0x0980
+#define RADEON_CAP0_VBI3_OFFSET           0x0984
+#define RADEON_CAP0_ANC2_OFFSET           0x0988
+#define RADEON_CAP0_ANC3_OFFSET           0x098C
+#define RADEON_VID_BUFFER_CONTROL         0x0900
+
+/* second capture unit */
+
+#define RADEON_CAP1_BUF0_OFFSET           0x0990
+#define RADEON_CAP1_BUF1_OFFSET           0x0994
+#define RADEON_CAP1_BUF0_EVEN_OFFSET      0x0998
+#define RADEON_CAP1_BUF1_EVEN_OFFSET      0x099C
+
+#define RADEON_CAP1_BUF_PITCH             0x09A0
+#define RADEON_CAP1_V_WINDOW              0x09A4
+#define RADEON_CAP1_H_WINDOW              0x09A8
+#define RADEON_CAP1_VBI_ODD_OFFSET        0x09AC
+#define RADEON_CAP1_VBI_EVEN_OFFSET       0x09B0
+#define RADEON_CAP1_VBI_V_WINDOW                  0x09B4
+#define RADEON_CAP1_VBI_H_WINDOW                  0x09B8
+#define RADEON_CAP1_PORT_MODE_CNTL        0x09BC
+#define RADEON_CAP1_TRIG_CNTL             0x09C0
+#define RADEON_CAP1_DEBUG                         0x09C4
+#define RADEON_CAP1_CONFIG                0x09C8
+#define RADEON_CAP1_ANC_ODD_OFFSET        0x09CC
+#define RADEON_CAP1_ANC_EVEN_OFFSET       0x09D0
+#define RADEON_CAP1_ANC_H_WINDOW                  0x09D4
+#define RADEON_CAP1_VIDEO_SYNC_TEST       0x09D8
+#define RADEON_CAP1_ONESHOT_BUF_OFFSET    0x09DC
+#define RADEON_CAP1_BUF_STATUS            0x09E0
+#define RADEON_CAP1_DWNSC_XRATIO                  0x09E8
+#define RADEON_CAP1_XSHARPNESS            0x09EC
+
+/* misc multimedia registers */
+
+#define RADEON_IDCT_RUNS                  0x1F80
+#define RADEON_IDCT_LEVELS                0x1F84
+#define RADEON_IDCT_CONTROL               0x1FBC
+#define RADEON_IDCT_AUTH_CONTROL          0x1F88
+#define RADEON_IDCT_AUTH                  0x1F8C
+
+#define RADEON_P2PLL_CNTL                   0x002a /* P2PLL */
+#       define RADEON_P2PLL_RESET                (1 <<  0)
+#       define RADEON_P2PLL_SLEEP                (1 <<  1)
+#       define RADEON_P2PLL_PVG_MASK             (7 << 11)
+#       define RADEON_P2PLL_PVG_SHIFT            11
+#       define RADEON_P2PLL_ATOMIC_UPDATE_EN     (1 << 16)
+#       define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+#       define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC  (1 << 18)
+#define RADEON_P2PLL_DIV_0                  0x002c
+#       define RADEON_P2PLL_FB0_DIV_MASK    0x07ff
+#       define RADEON_P2PLL_POST0_DIV_MASK  0x00070000
+#define RADEON_P2PLL_REF_DIV                0x002B /* PLL */
+#       define RADEON_P2PLL_REF_DIV_MASK    0x03ff
+#       define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+#       define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+#       define R300_PPLL_REF_DIV_ACC_MASK   (0x3ff << 18)
+#       define R300_PPLL_REF_DIV_ACC_SHIFT  18
+#define RADEON_PALETTE_DATA                 0x00b4
+#define RADEON_PALETTE_30_DATA              0x00b8
+#define RADEON_PALETTE_INDEX                0x00b0
+#define RADEON_PCI_GART_PAGE                0x017c
+#define RADEON_PIXCLKS_CNTL                 0x002d
+#       define RADEON_PIX2CLK_SRC_SEL_MASK     0x03
+#       define RADEON_PIX2CLK_SRC_SEL_CPUCLK   0x00
+#       define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
+#       define RADEON_PIX2CLK_SRC_SEL_BYTECLK  0x02
+#       define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
+#       define RADEON_PIX2CLK_ALWAYS_ONb       (1<<6)
+#       define RADEON_PIX2CLK_DAC_ALWAYS_ONb   (1<<7)
+#       define RADEON_PIXCLK_TV_SRC_SEL        (1 << 8)
+#       define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9)
+#       define R300_DVOCLK_ALWAYS_ONb          (1 << 10)
+#       define RADEON_PIXCLK_BLEND_ALWAYS_ONb  (1 << 11)
+#       define RADEON_PIXCLK_GV_ALWAYS_ONb     (1 << 12)
+#       define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13)
+#       define R300_PIXCLK_DVO_ALWAYS_ONb      (1 << 13)
+#       define RADEON_PIXCLK_LVDS_ALWAYS_ONb   (1 << 14)
+#       define RADEON_PIXCLK_TMDS_ALWAYS_ONb   (1 << 15)
+#       define R300_PIXCLK_TRANS_ALWAYS_ONb    (1 << 16)
+#       define R300_PIXCLK_TVO_ALWAYS_ONb      (1 << 17)
+#       define R300_P2G2CLK_ALWAYS_ONb         (1 << 18)
+#       define R300_P2G2CLK_DAC_ALWAYS_ONb     (1 << 19)
+#       define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23)
+#define RADEON_PLANE_3D_MASK_C              0x1d44
+#define RADEON_PLL_TEST_CNTL                0x0013 /* PLL */
+#       define RADEON_PLL_MASK_READ_B          (1 << 9)
+#define RADEON_PMI_CAP_ID                   0x0f5c /* PCI */
+#define RADEON_PMI_DATA                     0x0f63 /* PCI */
+#define RADEON_PMI_NXT_CAP_PTR              0x0f5d /* PCI */
+#define RADEON_PMI_PMC_REG                  0x0f5e /* PCI */
+#define RADEON_PMI_PMCSR_REG                0x0f60 /* PCI */
+#define RADEON_PMI_REGISTER                 0x0f5c /* PCI */
+#define RADEON_PPLL_CNTL                    0x0002 /* PLL */
+#       define RADEON_PPLL_RESET                (1 <<  0)
+#       define RADEON_PPLL_SLEEP                (1 <<  1)
+#       define RADEON_PPLL_PVG_MASK             (7 << 11)
+#       define RADEON_PPLL_PVG_SHIFT            11
+#       define RADEON_PPLL_ATOMIC_UPDATE_EN     (1 << 16)
+#       define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+#       define RADEON_PPLL_ATOMIC_UPDATE_VSYNC  (1 << 18)
+#define RADEON_PPLL_DIV_0                   0x0004 /* PLL */
+#define RADEON_PPLL_DIV_1                   0x0005 /* PLL */
+#define RADEON_PPLL_DIV_2                   0x0006 /* PLL */
+#define RADEON_PPLL_DIV_3                   0x0007 /* PLL */
+#       define RADEON_PPLL_FB3_DIV_MASK     0x07ff
+#       define RADEON_PPLL_POST3_DIV_MASK   0x00070000
+#define RADEON_PPLL_REF_DIV                 0x0003 /* PLL */
+#       define RADEON_PPLL_REF_DIV_MASK     0x03ff
+#       define RADEON_PPLL_ATOMIC_UPDATE_R  (1 << 15) /* same as _W */
+#       define RADEON_PPLL_ATOMIC_UPDATE_W  (1 << 15) /* same as _R */
+#define RADEON_PWR_MNGMT_CNTL_STATUS        0x0f60 /* PCI */
+
+#define RADEON_RBBM_GUICNTL                 0x172c
+#       define RADEON_HOST_DATA_SWAP_NONE   (0 << 0)
+#       define RADEON_HOST_DATA_SWAP_16BIT  (1 << 0)
+#       define RADEON_HOST_DATA_SWAP_32BIT  (2 << 0)
+#       define RADEON_HOST_DATA_SWAP_HDW    (3 << 0)
+#define RADEON_RBBM_SOFT_RESET              0x00f0
+#       define RADEON_SOFT_RESET_CP         (1 <<  0)
+#       define RADEON_SOFT_RESET_HI         (1 <<  1)
+#       define RADEON_SOFT_RESET_SE         (1 <<  2)
+#       define RADEON_SOFT_RESET_RE         (1 <<  3)
+#       define RADEON_SOFT_RESET_PP         (1 <<  4)
+#       define RADEON_SOFT_RESET_E2         (1 <<  5)
+#       define RADEON_SOFT_RESET_RB         (1 <<  6)
+#       define RADEON_SOFT_RESET_HDP        (1 <<  7)
+#define RADEON_RBBM_STATUS                  0x0e40
+#       define RADEON_RBBM_FIFOCNT_MASK     0x007f
+#       define RADEON_RBBM_ACTIVE           (1U << 31)
+#define RADEON_RB2D_DSTCACHE_CTLSTAT        0x342c
+#       define RADEON_RB2D_DC_FLUSH         (3 << 0)
+#       define RADEON_RB2D_DC_FREE          (3 << 2)
+#       define RADEON_RB2D_DC_FLUSH_ALL     0xf
+#       define RADEON_RB2D_DC_BUSY          (1U << 31)
+#define RADEON_RB2D_DSTCACHE_MODE           0x3428
+#define RADEON_DSTCACHE_CTLSTAT             0x1714
+
+#define RADEON_RB3D_ZCACHE_MODE             0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT          0x3254
+#       define RADEON_RB3D_ZC_FLUSH_ALL     0x5
+#define RADEON_RB3D_DSTCACHE_MODE           0x3258
+# define RADEON_RB3D_DC_CACHE_ENABLE            (0)
+# define RADEON_RB3D_DC_2D_CACHE_DISABLE        (1)
+# define RADEON_RB3D_DC_3D_CACHE_DISABLE        (2)
+# define RADEON_RB3D_DC_CACHE_DISABLE           (3)
+# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128   (1 << 2)
+# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128   (2 << 2)
+# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH      (1 << 8)
+# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH      (2 << 8)
+# define R200_RB3D_DC_2D_CACHE_AUTOFREE         (1 << 10)
+# define R200_RB3D_DC_3D_CACHE_AUTOFREE         (2 << 10)
+# define RADEON_RB3D_DC_FORCE_RMW               (1 << 16)
+# define RADEON_RB3D_DC_DISABLE_RI_FILL         (1 << 24)
+# define RADEON_RB3D_DC_DISABLE_RI_READ         (1 << 25)
+
+#define RADEON_RB3D_DSTCACHE_CTLSTAT            0x325C
+# define RADEON_RB3D_DC_FLUSH                   (3 << 0)
+# define RADEON_RB3D_DC_FREE                    (3 << 2)
+# define RADEON_RB3D_DC_FLUSH_ALL               0xf
+# define RADEON_RB3D_DC_BUSY                    (1U << 31)
+
+#define RADEON_REG_BASE                     0x0f18 /* PCI */
+#define RADEON_REGPROG_INF                  0x0f09 /* PCI */
+#define RADEON_REVISION_ID                  0x0f08 /* PCI */
+
+#define RADEON_SC_BOTTOM                    0x164c
+#define RADEON_SC_BOTTOM_RIGHT              0x16f0
+#define RADEON_SC_BOTTOM_RIGHT_C            0x1c8c
+#define RADEON_SC_LEFT                      0x1640
+#define RADEON_SC_RIGHT                     0x1644
+#define RADEON_SC_TOP                       0x1648
+#define RADEON_SC_TOP_LEFT                  0x16ec
+#define RADEON_SC_TOP_LEFT_C                0x1c88
+#       define RADEON_SC_SIGN_MASK_LO       0x8000
+#       define RADEON_SC_SIGN_MASK_HI       0x80000000
+#define RADEON_M_SPLL_REF_FB_DIV            0x000a /* PLL */
+#	define RADEON_M_SPLL_REF_DIV_SHIFT  0
+#	define RADEON_M_SPLL_REF_DIV_MASK   0xff
+#	define RADEON_MPLL_FB_DIV_SHIFT     8
+#	define RADEON_MPLL_FB_DIV_MASK      0xff
+#	define RADEON_SPLL_FB_DIV_SHIFT     16
+#	define RADEON_SPLL_FB_DIV_MASK      0xff
+#define RADEON_SPLL_CNTL                    0x000c /* PLL */
+#       define RADEON_SPLL_SLEEP            (1 << 0)
+#       define RADEON_SPLL_RESET            (1 << 1)
+#       define RADEON_SPLL_PCP_MASK         0x7
+#       define RADEON_SPLL_PCP_SHIFT        8
+#       define RADEON_SPLL_PVG_MASK         0x7
+#       define RADEON_SPLL_PVG_SHIFT        11
+#       define RADEON_SPLL_PDC_MASK         0x3
+#       define RADEON_SPLL_PDC_SHIFT        14
+#define RADEON_SCLK_CNTL                    0x000d /* PLL */
+#       define RADEON_SCLK_SRC_SEL_MASK     0x0007
+#       define RADEON_DYN_STOP_LAT_MASK     0x00007ff8
+#       define RADEON_CP_MAX_DYN_STOP_LAT   0x0008
+#       define RADEON_SCLK_FORCEON_MASK     0xffff8000
+#       define RADEON_SCLK_FORCE_DISP2      (1<<15)
+#       define RADEON_SCLK_FORCE_CP         (1<<16)
+#       define RADEON_SCLK_FORCE_HDP        (1<<17)
+#       define RADEON_SCLK_FORCE_DISP1      (1<<18)
+#       define RADEON_SCLK_FORCE_TOP        (1<<19)
+#       define RADEON_SCLK_FORCE_E2         (1<<20)
+#       define RADEON_SCLK_FORCE_SE         (1<<21)
+#       define RADEON_SCLK_FORCE_IDCT       (1<<22)
+#       define RADEON_SCLK_FORCE_VIP        (1<<23)
+#       define RADEON_SCLK_FORCE_RE         (1<<24)
+#       define RADEON_SCLK_FORCE_PB         (1<<25)
+#       define RADEON_SCLK_FORCE_TAM        (1<<26)
+#       define RADEON_SCLK_FORCE_TDM        (1<<27)
+#       define RADEON_SCLK_FORCE_RB         (1<<28)
+#       define RADEON_SCLK_FORCE_TV_SCLK    (1<<29)
+#       define RADEON_SCLK_FORCE_SUBPIC     (1<<30)
+#       define RADEON_SCLK_FORCE_OV0        (1<<31)
+#       define R300_SCLK_FORCE_VAP          (1<<21)
+#       define R300_SCLK_FORCE_SR           (1<<25)
+#       define R300_SCLK_FORCE_PX           (1<<26)
+#       define R300_SCLK_FORCE_TX           (1<<27)
+#       define R300_SCLK_FORCE_US           (1<<28)
+#       define R300_SCLK_FORCE_SU           (1<<30)
+#define R300_SCLK_CNTL2                     0x1e   /* PLL */
+#       define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10)
+#       define R300_SCLK_GA_MAX_DYN_STOP_LAT  (1<<11)
+#       define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12)
+#       define R300_SCLK_FORCE_TCL          (1<<13)
+#       define R300_SCLK_FORCE_CBA          (1<<14)
+#       define R300_SCLK_FORCE_GA           (1<<15)
+#define RADEON_SCLK_MORE_CNTL               0x0035 /* PLL */
+#       define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007
+#       define RADEON_SCLK_MORE_FORCEON     0x0700
+#define RADEON_SDRAM_MODE_REG               0x0158
+#define RADEON_SEQ8_DATA                    0x03c5 /* VGA */
+#define RADEON_SEQ8_IDX                     0x03c4 /* VGA */
+#define RADEON_SNAPSHOT_F_COUNT             0x0244
+#define RADEON_SNAPSHOT_VH_COUNTS           0x0240
+#define RADEON_SNAPSHOT_VIF_COUNT           0x024c
+#define RADEON_SRC_OFFSET                   0x15ac
+#define RADEON_SRC_PITCH                    0x15b0
+#define RADEON_SRC_PITCH_OFFSET             0x1428
+#define RADEON_SRC_SC_BOTTOM                0x165c
+#define RADEON_SRC_SC_BOTTOM_RIGHT          0x16f4
+#define RADEON_SRC_SC_RIGHT                 0x1654
+#define RADEON_SRC_X                        0x1414
+#define RADEON_SRC_X_Y                      0x1590
+#define RADEON_SRC_Y                        0x1418
+#define RADEON_SRC_Y_X                      0x1434
+#define RADEON_STATUS                       0x0f06 /* PCI */
+#define RADEON_SUBPIC_CNTL                  0x0540 /* ? */
+#define RADEON_SUB_CLASS                    0x0f0a /* PCI */
+#define RADEON_SURFACE_CNTL                 0x0b00
+#       define RADEON_SURF_TRANSLATION_DIS  (1 << 8)
+#       define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
+#       define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
+#       define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22)
+#       define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_INFO                0x0b0c
+#       define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
+#       define RADEON_SURF_TILE_COLOR_BOTH  (1 << 16)
+#       define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
+#       define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
+#       define R200_SURF_TILE_NONE          (0 << 16)
+#       define R200_SURF_TILE_COLOR_MACRO   (1 << 16)
+#       define R200_SURF_TILE_COLOR_MICRO   (2 << 16)
+#       define R200_SURF_TILE_COLOR_BOTH    (3 << 16)
+#       define R200_SURF_TILE_DEPTH_32BPP   (4 << 16)
+#       define R200_SURF_TILE_DEPTH_16BPP   (5 << 16)
+#       define R300_SURF_TILE_NONE          (0 << 16)
+#       define R300_SURF_TILE_COLOR_MACRO   (1 << 16)
+#       define R300_SURF_TILE_DEPTH_32BPP   (2 << 16)
+#       define RADEON_SURF_AP0_SWP_16BPP    (1 << 20)
+#       define RADEON_SURF_AP0_SWP_32BPP    (1 << 21)
+#       define RADEON_SURF_AP1_SWP_16BPP    (1 << 22)
+#       define RADEON_SURF_AP1_SWP_32BPP    (1 << 23)
+#define RADEON_SURFACE0_LOWER_BOUND         0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND         0x0b08
+#define RADEON_SURFACE1_INFO                0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND         0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND         0x0b18
+#define RADEON_SURFACE2_INFO                0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND         0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND         0x0b28
+#define RADEON_SURFACE3_INFO                0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND         0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND         0x0b38
+#define RADEON_SURFACE4_INFO                0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND         0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND         0x0b48
+#define RADEON_SURFACE5_INFO                0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND         0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND         0x0b58
+#define RADEON_SURFACE6_INFO                0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND         0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND         0x0b68
+#define RADEON_SURFACE7_INFO                0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND         0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND         0x0b78
+#define RADEON_SW_SEMAPHORE                 0x013c
+
+#define RADEON_TEST_DEBUG_CNTL              0x0120
+#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001
+
+#define RADEON_TEST_DEBUG_MUX               0x0124
+#define RADEON_TEST_DEBUG_OUT               0x012c
+#define RADEON_TMDS_PLL_CNTL                0x02a8
+#define RADEON_TMDS_TRANSMITTER_CNTL        0x02a4
+#       define RADEON_TMDS_TRANSMITTER_PLLEN  1
+#       define RADEON_TMDS_TRANSMITTER_PLLRST 2
+#define RADEON_TRAIL_BRES_DEC               0x1614
+#define RADEON_TRAIL_BRES_ERR               0x160c
+#define RADEON_TRAIL_BRES_INC               0x1610
+#define RADEON_TRAIL_X                      0x1618
+#define RADEON_TRAIL_X_SUB                  0x1620
+
+#define RADEON_VCLK_ECP_CNTL                0x0008 /* PLL */
+#       define RADEON_VCLK_SRC_SEL_MASK     0x03
+#       define RADEON_VCLK_SRC_SEL_CPUCLK   0x00
+#       define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
+#       define RADEON_VCLK_SRC_SEL_BYTECLK  0x02
+#       define RADEON_VCLK_SRC_SEL_PPLLCLK  0x03
+#       define RADEON_PIXCLK_ALWAYS_ONb     (1<<6)
+#       define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
+#       define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23)
+
+#define RADEON_VENDOR_ID                    0x0f00 /* PCI */
+#define RADEON_VGA_DDA_CONFIG               0x02e8
+#define RADEON_VGA_DDA_ON_OFF               0x02ec
+#define RADEON_VID_BUFFER_CONTROL           0x0900
+#define RADEON_VIDEOMUX_CNTL                0x0190
+
+/* VIP bus */
+#define RADEON_VIPH_CH0_DATA                0x0c00
+#define RADEON_VIPH_CH1_DATA                0x0c04
+#define RADEON_VIPH_CH2_DATA                0x0c08
+#define RADEON_VIPH_CH3_DATA                0x0c0c
+#define RADEON_VIPH_CH0_ADDR                0x0c10
+#define RADEON_VIPH_CH1_ADDR                0x0c14
+#define RADEON_VIPH_CH2_ADDR                0x0c18
+#define RADEON_VIPH_CH3_ADDR                0x0c1c
+#define RADEON_VIPH_CH0_SBCNT               0x0c20
+#define RADEON_VIPH_CH1_SBCNT               0x0c24
+#define RADEON_VIPH_CH2_SBCNT               0x0c28
+#define RADEON_VIPH_CH3_SBCNT               0x0c2c
+#define RADEON_VIPH_CH0_ABCNT               0x0c30
+#define RADEON_VIPH_CH1_ABCNT               0x0c34
+#define RADEON_VIPH_CH2_ABCNT               0x0c38
+#define RADEON_VIPH_CH3_ABCNT               0x0c3c
+#define RADEON_VIPH_CONTROL                 0x0c40
+#       define RADEON_VIP_BUSY 0
+#       define RADEON_VIP_IDLE 1
+#       define RADEON_VIP_RESET 2
+#       define RADEON_VIPH_EN               (1 << 21)
+#define RADEON_VIPH_DV_LAT                  0x0c44
+#define RADEON_VIPH_BM_CHUNK                0x0c48
+#define RADEON_VIPH_DV_INT                  0x0c4c
+#define RADEON_VIPH_TIMEOUT_STAT            0x0c50
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK   0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000
+
+#define RADEON_VIPH_REG_DATA                0x0084
+#define RADEON_VIPH_REG_ADDR                0x0080
+
+
+#define RADEON_WAIT_UNTIL                   0x1720
+#       define RADEON_WAIT_CRTC_PFLIP       (1 << 0)
+#       define RADEON_WAIT_RE_CRTC_VLINE    (1 << 1)
+#       define RADEON_WAIT_FE_CRTC_VLINE    (1 << 2)
+#       define RADEON_WAIT_CRTC_VLINE       (1 << 3)
+#       define RADEON_WAIT_DMA_VID_IDLE     (1 << 8)
+#       define RADEON_WAIT_DMA_GUI_IDLE     (1 << 9)
+#       define RADEON_WAIT_CMDFIFO          (1 << 10) /* wait for CMDFIFO_ENTRIES */
+#       define RADEON_WAIT_OV0_FLIP         (1 << 11)
+#       define RADEON_WAIT_AGP_FLUSH        (1 << 13)
+#       define RADEON_WAIT_2D_IDLE          (1 << 14)
+#       define RADEON_WAIT_3D_IDLE          (1 << 15)
+#       define RADEON_WAIT_2D_IDLECLEAN     (1 << 16)
+#       define RADEON_WAIT_3D_IDLECLEAN     (1 << 17)
+#       define RADEON_WAIT_HOST_IDLECLEAN   (1 << 18)
+#       define RADEON_CMDFIFO_ENTRIES_SHIFT 10
+#       define RADEON_CMDFIFO_ENTRIES_MASK  0x7f
+#       define RADEON_WAIT_VAP_IDLE         (1 << 28)
+#       define RADEON_WAIT_BOTH_CRTC_PFLIP  (1 << 30)
+#       define RADEON_ENG_DISPLAY_SELECT_CRTC0    (0 << 31)
+#       define RADEON_ENG_DISPLAY_SELECT_CRTC1    (1U << 31)
+
+#define RADEON_X_MPLL_REF_FB_DIV            0x000a /* PLL */
+#define RADEON_XCLK_CNTL                    0x000d /* PLL */
+#define RADEON_XDLL_CNTL                    0x000c /* PLL */
+#define RADEON_XPLL_CNTL                    0x000b /* PLL */
+
+
+
+				/* Registers for 3D/TCL */
+#define RADEON_PP_BORDER_COLOR_0            0x1d40
+#define RADEON_PP_BORDER_COLOR_1            0x1d44
+#define RADEON_PP_BORDER_COLOR_2            0x1d48
+#define RADEON_PP_CNTL                      0x1c38
+#       define RADEON_STIPPLE_ENABLE        (1 <<  0)
+#       define RADEON_SCISSOR_ENABLE        (1 <<  1)
+#       define RADEON_PATTERN_ENABLE        (1 <<  2)
+#       define RADEON_SHADOW_ENABLE         (1 <<  3)
+#       define RADEON_TEX_ENABLE_MASK       (0xf << 4)
+#       define RADEON_TEX_0_ENABLE          (1 <<  4)
+#       define RADEON_TEX_1_ENABLE          (1 <<  5)
+#       define RADEON_TEX_2_ENABLE          (1 <<  6)
+#       define RADEON_TEX_3_ENABLE          (1 <<  7)
+#       define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
+#       define RADEON_TEX_BLEND_0_ENABLE    (1 << 12)
+#       define RADEON_TEX_BLEND_1_ENABLE    (1 << 13)
+#       define RADEON_TEX_BLEND_2_ENABLE    (1 << 14)
+#       define RADEON_TEX_BLEND_3_ENABLE    (1 << 15)
+#       define RADEON_PLANAR_YUV_ENABLE     (1 << 20)
+#       define RADEON_SPECULAR_ENABLE       (1 << 21)
+#       define RADEON_FOG_ENABLE            (1 << 22)
+#       define RADEON_ALPHA_TEST_ENABLE     (1 << 23)
+#       define RADEON_ANTI_ALIAS_NONE       (0 << 24)
+#       define RADEON_ANTI_ALIAS_LINE       (1 << 24)
+#       define RADEON_ANTI_ALIAS_POLY       (2 << 24)
+#       define RADEON_ANTI_ALIAS_LINE_POLY  (3 << 24)
+#       define RADEON_BUMP_MAP_ENABLE       (1 << 26)
+#       define RADEON_BUMPED_MAP_T0         (0 << 27)
+#       define RADEON_BUMPED_MAP_T1         (1 << 27)
+#       define RADEON_BUMPED_MAP_T2         (2 << 27)
+#       define RADEON_TEX_3D_ENABLE_0       (1 << 29)
+#       define RADEON_TEX_3D_ENABLE_1       (1 << 30)
+#       define RADEON_MC_ENABLE             (1U << 31)
+#define RADEON_PP_FOG_COLOR                 0x1c18
+#       define RADEON_FOG_COLOR_MASK        0x00ffffff
+#       define RADEON_FOG_VERTEX            (0 << 24)
+#       define RADEON_FOG_TABLE             (1 << 24)
+#       define RADEON_FOG_USE_DEPTH         (0 << 25)
+#       define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
+#       define RADEON_FOG_USE_SPEC_ALPHA    (3 << 25)
+#define RADEON_PP_LUM_MATRIX                0x1d00
+#define RADEON_PP_MISC                      0x1c14
+#       define RADEON_REF_ALPHA_MASK        0x000000ff
+#       define RADEON_ALPHA_TEST_FAIL       (0 << 8)
+#       define RADEON_ALPHA_TEST_LESS       (1 << 8)
+#       define RADEON_ALPHA_TEST_LEQUAL     (2 << 8)
+#       define RADEON_ALPHA_TEST_EQUAL      (3 << 8)
+#       define RADEON_ALPHA_TEST_GEQUAL     (4 << 8)
+#       define RADEON_ALPHA_TEST_GREATER    (5 << 8)
+#       define RADEON_ALPHA_TEST_NEQUAL     (6 << 8)
+#       define RADEON_ALPHA_TEST_PASS       (7 << 8)
+#       define RADEON_ALPHA_TEST_OP_MASK    (7 << 8)
+#       define RADEON_CHROMA_FUNC_FAIL      (0 << 16)
+#       define RADEON_CHROMA_FUNC_PASS      (1 << 16)
+#       define RADEON_CHROMA_FUNC_NEQUAL    (2 << 16)
+#       define RADEON_CHROMA_FUNC_EQUAL     (3 << 16)
+#       define RADEON_CHROMA_KEY_NEAREST    (0 << 18)
+#       define RADEON_CHROMA_KEY_ZERO       (1 << 18)
+#       define RADEON_SHADOW_ID_AUTO_INC    (1 << 20)
+#       define RADEON_SHADOW_FUNC_EQUAL     (0 << 21)
+#       define RADEON_SHADOW_FUNC_NEQUAL    (1 << 21)
+#       define RADEON_SHADOW_PASS_1         (0 << 22)
+#       define RADEON_SHADOW_PASS_2         (1 << 22)
+#       define RADEON_RIGHT_HAND_CUBE_D3D   (0 << 24)
+#       define RADEON_RIGHT_HAND_CUBE_OGL   (1 << 24)
+#define RADEON_PP_ROT_MATRIX_0              0x1d58
+#define RADEON_PP_ROT_MATRIX_1              0x1d5c
+#define RADEON_PP_TXFILTER_0                0x1c54
+#define RADEON_PP_TXFILTER_1                0x1c6c
+#define RADEON_PP_TXFILTER_2                0x1c84
+#       define RADEON_MAG_FILTER_NEAREST                   (0  <<  0)
+#       define RADEON_MAG_FILTER_LINEAR                    (1  <<  0)
+#       define RADEON_MAG_FILTER_MASK                      (1  <<  0)
+#       define RADEON_MIN_FILTER_NEAREST                   (0  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR                    (1  <<  1)
+#       define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST       (2  <<  1)
+#       define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR        (3  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR         (7  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST             (8  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_LINEAR              (9  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (11 <<  1)
+#       define RADEON_MIN_FILTER_MASK                      (15 <<  1)
+#       define RADEON_MAX_ANISO_1_TO_1                     (0  <<  5)
+#       define RADEON_MAX_ANISO_2_TO_1                     (1  <<  5)
+#       define RADEON_MAX_ANISO_4_TO_1                     (2  <<  5)
+#       define RADEON_MAX_ANISO_8_TO_1                     (3  <<  5)
+#       define RADEON_MAX_ANISO_16_TO_1                    (4  <<  5)
+#       define RADEON_MAX_ANISO_MASK                       (7  <<  5)
+#       define RADEON_LOD_BIAS_MASK                        (0xff <<  8)
+#       define RADEON_LOD_BIAS_SHIFT                       8
+#       define RADEON_MAX_MIP_LEVEL_MASK                   (0x0f << 16)
+#       define RADEON_MAX_MIP_LEVEL_SHIFT                  16
+#       define RADEON_YUV_TO_RGB                           (1  << 20)
+#       define RADEON_YUV_TEMPERATURE_COOL                 (0  << 21)
+#       define RADEON_YUV_TEMPERATURE_HOT                  (1  << 21)
+#       define RADEON_YUV_TEMPERATURE_MASK                 (1  << 21)
+#       define RADEON_WRAPEN_S                             (1  << 22)
+#       define RADEON_CLAMP_S_WRAP                         (0  << 23)
+#       define RADEON_CLAMP_S_MIRROR                       (1  << 23)
+#       define RADEON_CLAMP_S_CLAMP_LAST                   (2  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_LAST            (3  << 23)
+#       define RADEON_CLAMP_S_CLAMP_BORDER                 (4  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER          (5  << 23)
+#       define RADEON_CLAMP_S_CLAMP_GL                     (6  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_GL              (7  << 23)
+#       define RADEON_CLAMP_S_MASK                         (7  << 23)
+#       define RADEON_WRAPEN_T                             (1  << 26)
+#       define RADEON_CLAMP_T_WRAP                         (0  << 27)
+#       define RADEON_CLAMP_T_MIRROR                       (1  << 27)
+#       define RADEON_CLAMP_T_CLAMP_LAST                   (2  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_LAST            (3  << 27)
+#       define RADEON_CLAMP_T_CLAMP_BORDER                 (4  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER          (5  << 27)
+#       define RADEON_CLAMP_T_CLAMP_GL                     (6  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_GL              (7  << 27)
+#       define RADEON_CLAMP_T_MASK                         (7  << 27)
+#       define RADEON_BORDER_MODE_OGL                      (0  << 31)
+#       define RADEON_BORDER_MODE_D3D                      (1  << 31)
+#define RADEON_PP_TXFORMAT_0                0x1c58
+#define RADEON_PP_TXFORMAT_1                0x1c70
+#define RADEON_PP_TXFORMAT_2                0x1c88
+#       define RADEON_TXFORMAT_I8                 (0  <<  0)
+#       define RADEON_TXFORMAT_AI88               (1  <<  0)
+#       define RADEON_TXFORMAT_RGB332             (2  <<  0)
+#       define RADEON_TXFORMAT_ARGB1555           (3  <<  0)
+#       define RADEON_TXFORMAT_RGB565             (4  <<  0)
+#       define RADEON_TXFORMAT_ARGB4444           (5  <<  0)
+#       define RADEON_TXFORMAT_ARGB8888           (6  <<  0)
+#       define RADEON_TXFORMAT_RGBA8888           (7  <<  0)
+#       define RADEON_TXFORMAT_Y8                 (8  <<  0)
+#       define RADEON_TXFORMAT_VYUY422            (10 <<  0)
+#       define RADEON_TXFORMAT_YVYU422            (11 <<  0)
+#       define RADEON_TXFORMAT_DXT1               (12 <<  0)
+#       define RADEON_TXFORMAT_DXT23              (14 <<  0)
+#       define RADEON_TXFORMAT_DXT45              (15 <<  0)
+#	define RADEON_TXFORMAT_SHADOW16           (16 <<  0)
+#	define RADEON_TXFORMAT_SHADOW32           (17 <<  0)
+#       define RADEON_TXFORMAT_DUDV88             (18 <<  0)
+#       define RADEON_TXFORMAT_LDUDV655           (19 <<  0)
+#       define RADEON_TXFORMAT_LDUDUV8888         (20 <<  0)
+#       define RADEON_TXFORMAT_FORMAT_MASK        (31 <<  0)
+#       define RADEON_TXFORMAT_FORMAT_SHIFT       0
+#       define RADEON_TXFORMAT_APPLE_YUV_MODE     (1  <<  5)
+#       define RADEON_TXFORMAT_ALPHA_IN_MAP       (1  <<  6)
+#       define RADEON_TXFORMAT_NON_POWER2         (1  <<  7)
+#       define RADEON_TXFORMAT_WIDTH_MASK         (15 <<  8)
+#       define RADEON_TXFORMAT_WIDTH_SHIFT        8
+#       define RADEON_TXFORMAT_HEIGHT_MASK        (15 << 12)
+#       define RADEON_TXFORMAT_HEIGHT_SHIFT       12
+#       define RADEON_TXFORMAT_F5_WIDTH_MASK      (15 << 16)
+#       define RADEON_TXFORMAT_F5_WIDTH_SHIFT     16
+#       define RADEON_TXFORMAT_F5_HEIGHT_MASK     (15 << 20)
+#       define RADEON_TXFORMAT_F5_HEIGHT_SHIFT    20
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ0      (0  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_MASK      (3  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ1      (1  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ2      (2  << 24)
+#       define RADEON_TXFORMAT_ENDIAN_NO_SWAP     (0  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP  (1  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP  (2  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3  << 26)
+#       define RADEON_TXFORMAT_ALPHA_MASK_ENABLE  (1  << 28)
+#       define RADEON_TXFORMAT_CHROMA_KEY_ENABLE  (1  << 29)
+#       define RADEON_TXFORMAT_CUBIC_MAP_ENABLE   (1  << 30)
+#       define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1  << 31)
+#define RADEON_PP_CUBIC_FACES_0             0x1d24
+#define RADEON_PP_CUBIC_FACES_1             0x1d28
+#define RADEON_PP_CUBIC_FACES_2             0x1d2c
+#       define RADEON_FACE_WIDTH_1_SHIFT          0
+#       define RADEON_FACE_HEIGHT_1_SHIFT         4
+#       define RADEON_FACE_WIDTH_1_MASK           (0xf << 0)
+#       define RADEON_FACE_HEIGHT_1_MASK          (0xf << 4)
+#       define RADEON_FACE_WIDTH_2_SHIFT          8
+#       define RADEON_FACE_HEIGHT_2_SHIFT         12
+#       define RADEON_FACE_WIDTH_2_MASK           (0xf << 8)
+#       define RADEON_FACE_HEIGHT_2_MASK          (0xf << 12)
+#       define RADEON_FACE_WIDTH_3_SHIFT          16
+#       define RADEON_FACE_HEIGHT_3_SHIFT         20
+#       define RADEON_FACE_WIDTH_3_MASK           (0xf << 16)
+#       define RADEON_FACE_HEIGHT_3_MASK          (0xf << 20)
+#       define RADEON_FACE_WIDTH_4_SHIFT          24
+#       define RADEON_FACE_HEIGHT_4_SHIFT         28
+#       define RADEON_FACE_WIDTH_4_MASK           (0xf << 24)
+#       define RADEON_FACE_HEIGHT_4_MASK          (0xf << 28)
+
+#define RADEON_PP_TXOFFSET_0                0x1c5c
+#define RADEON_PP_TXOFFSET_1                0x1c74
+#define RADEON_PP_TXOFFSET_2                0x1c8c
+#       define RADEON_TXO_ENDIAN_NO_SWAP     (0 << 0)
+#       define RADEON_TXO_ENDIAN_BYTE_SWAP   (1 << 0)
+#       define RADEON_TXO_ENDIAN_WORD_SWAP   (2 << 0)
+#       define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+#       define RADEON_TXO_MACRO_LINEAR       (0 << 2)
+#       define RADEON_TXO_MACRO_TILE         (1 << 2)
+#       define RADEON_TXO_MICRO_LINEAR       (0 << 3)
+#       define RADEON_TXO_MICRO_TILE_X2      (1 << 3)
+#       define RADEON_TXO_MICRO_TILE_OPT     (2 << 3)
+#       define RADEON_TXO_OFFSET_MASK        0xffffffe0
+#       define RADEON_TXO_OFFSET_SHIFT       5
+
+#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0  /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T0_1         0x1dd4
+#define RADEON_PP_CUBIC_OFFSET_T0_2         0x1dd8
+#define RADEON_PP_CUBIC_OFFSET_T0_3         0x1ddc
+#define RADEON_PP_CUBIC_OFFSET_T0_4         0x1de0
+#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T1_1         0x1e04
+#define RADEON_PP_CUBIC_OFFSET_T1_2         0x1e08
+#define RADEON_PP_CUBIC_OFFSET_T1_3         0x1e0c
+#define RADEON_PP_CUBIC_OFFSET_T1_4         0x1e10
+#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
+#define RADEON_PP_CUBIC_OFFSET_T2_1         0x1e18
+#define RADEON_PP_CUBIC_OFFSET_T2_2         0x1e1c
+#define RADEON_PP_CUBIC_OFFSET_T2_3         0x1e20
+#define RADEON_PP_CUBIC_OFFSET_T2_4         0x1e24
+
+#define RADEON_PP_TEX_SIZE_0                0x1d04  /* NPOT */
+#define RADEON_PP_TEX_SIZE_1                0x1d0c
+#define RADEON_PP_TEX_SIZE_2                0x1d14
+#       define RADEON_TEX_USIZE_MASK        (0x7ff << 0)
+#       define RADEON_TEX_USIZE_SHIFT       0
+#       define RADEON_TEX_VSIZE_MASK        (0x7ff << 16)
+#       define RADEON_TEX_VSIZE_SHIFT       16
+#       define RADEON_SIGNED_RGB_MASK       (1 << 30)
+#       define RADEON_SIGNED_RGB_SHIFT      30
+#       define RADEON_SIGNED_ALPHA_MASK     (1U << 31)
+#       define RADEON_SIGNED_ALPHA_SHIFT    31
+#define RADEON_PP_TEX_PITCH_0               0x1d08  /* NPOT */
+#define RADEON_PP_TEX_PITCH_1               0x1d10  /* NPOT */
+#define RADEON_PP_TEX_PITCH_2               0x1d18  /* NPOT */
+/* note: bits 13-5: 32 byte aligned stride of texture map */
+
+#define RADEON_PP_TXCBLEND_0                0x1c60
+#define RADEON_PP_TXCBLEND_1                0x1c78
+#define RADEON_PP_TXCBLEND_2                0x1c90
+#       define RADEON_COLOR_ARG_A_SHIFT          0
+#       define RADEON_COLOR_ARG_A_MASK           (0x1f << 0)
+#       define RADEON_COLOR_ARG_A_ZERO           (0    << 0)
+#       define RADEON_COLOR_ARG_A_CURRENT_COLOR  (2    << 0)
+#       define RADEON_COLOR_ARG_A_CURRENT_ALPHA  (3    << 0)
+#       define RADEON_COLOR_ARG_A_DIFFUSE_COLOR  (4    << 0)
+#       define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA  (5    << 0)
+#       define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6    << 0)
+#       define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7    << 0)
+#       define RADEON_COLOR_ARG_A_TFACTOR_COLOR  (8    << 0)
+#       define RADEON_COLOR_ARG_A_TFACTOR_ALPHA  (9    << 0)
+#       define RADEON_COLOR_ARG_A_T0_COLOR       (10   << 0)
+#       define RADEON_COLOR_ARG_A_T0_ALPHA       (11   << 0)
+#       define RADEON_COLOR_ARG_A_T1_COLOR       (12   << 0)
+#       define RADEON_COLOR_ARG_A_T1_ALPHA       (13   << 0)
+#       define RADEON_COLOR_ARG_A_T2_COLOR       (14   << 0)
+#       define RADEON_COLOR_ARG_A_T2_ALPHA       (15   << 0)
+#       define RADEON_COLOR_ARG_A_T3_COLOR       (16   << 0)
+#       define RADEON_COLOR_ARG_A_T3_ALPHA       (17   << 0)
+#       define RADEON_COLOR_ARG_B_SHIFT          5
+#       define RADEON_COLOR_ARG_B_MASK           (0x1f << 5)
+#       define RADEON_COLOR_ARG_B_ZERO           (0    << 5)
+#       define RADEON_COLOR_ARG_B_CURRENT_COLOR  (2    << 5)
+#       define RADEON_COLOR_ARG_B_CURRENT_ALPHA  (3    << 5)
+#       define RADEON_COLOR_ARG_B_DIFFUSE_COLOR  (4    << 5)
+#       define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA  (5    << 5)
+#       define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6    << 5)
+#       define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7    << 5)
+#       define RADEON_COLOR_ARG_B_TFACTOR_COLOR  (8    << 5)
+#       define RADEON_COLOR_ARG_B_TFACTOR_ALPHA  (9    << 5)
+#       define RADEON_COLOR_ARG_B_T0_COLOR       (10   << 5)
+#       define RADEON_COLOR_ARG_B_T0_ALPHA       (11   << 5)
+#       define RADEON_COLOR_ARG_B_T1_COLOR       (12   << 5)
+#       define RADEON_COLOR_ARG_B_T1_ALPHA       (13   << 5)
+#       define RADEON_COLOR_ARG_B_T2_COLOR       (14   << 5)
+#       define RADEON_COLOR_ARG_B_T2_ALPHA       (15   << 5)
+#       define RADEON_COLOR_ARG_B_T3_COLOR       (16   << 5)
+#       define RADEON_COLOR_ARG_B_T3_ALPHA       (17   << 5)
+#       define RADEON_COLOR_ARG_C_SHIFT          10
+#       define RADEON_COLOR_ARG_C_MASK           (0x1f << 10)
+#       define RADEON_COLOR_ARG_C_ZERO           (0    << 10)
+#       define RADEON_COLOR_ARG_C_CURRENT_COLOR  (2    << 10)
+#       define RADEON_COLOR_ARG_C_CURRENT_ALPHA  (3    << 10)
+#       define RADEON_COLOR_ARG_C_DIFFUSE_COLOR  (4    << 10)
+#       define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA  (5    << 10)
+#       define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6    << 10)
+#       define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7    << 10)
+#       define RADEON_COLOR_ARG_C_TFACTOR_COLOR  (8    << 10)
+#       define RADEON_COLOR_ARG_C_TFACTOR_ALPHA  (9    << 10)
+#       define RADEON_COLOR_ARG_C_T0_COLOR       (10   << 10)
+#       define RADEON_COLOR_ARG_C_T0_ALPHA       (11   << 10)
+#       define RADEON_COLOR_ARG_C_T1_COLOR       (12   << 10)
+#       define RADEON_COLOR_ARG_C_T1_ALPHA       (13   << 10)
+#       define RADEON_COLOR_ARG_C_T2_COLOR       (14   << 10)
+#       define RADEON_COLOR_ARG_C_T2_ALPHA       (15   << 10)
+#       define RADEON_COLOR_ARG_C_T3_COLOR       (16   << 10)
+#       define RADEON_COLOR_ARG_C_T3_ALPHA       (17   << 10)
+#       define RADEON_COMP_ARG_A                 (1 << 15)
+#       define RADEON_COMP_ARG_A_SHIFT           15
+#       define RADEON_COMP_ARG_B                 (1 << 16)
+#       define RADEON_COMP_ARG_B_SHIFT           16
+#       define RADEON_COMP_ARG_C                 (1 << 17)
+#       define RADEON_COMP_ARG_C_SHIFT           17
+#       define RADEON_BLEND_CTL_MASK             (7 << 18)
+#       define RADEON_BLEND_CTL_ADD              (0 << 18)
+#       define RADEON_BLEND_CTL_SUBTRACT         (1 << 18)
+#       define RADEON_BLEND_CTL_ADDSIGNED        (2 << 18)
+#       define RADEON_BLEND_CTL_BLEND            (3 << 18)
+#       define RADEON_BLEND_CTL_DOT3             (4 << 18)
+#       define RADEON_SCALE_SHIFT                21
+#       define RADEON_SCALE_MASK                 (3 << 21)
+#       define RADEON_SCALE_1X                   (0 << 21)
+#       define RADEON_SCALE_2X                   (1 << 21)
+#       define RADEON_SCALE_4X                   (2 << 21)
+#       define RADEON_CLAMP_TX                   (1 << 23)
+#       define RADEON_T0_EQ_TCUR                 (1 << 24)
+#       define RADEON_T1_EQ_TCUR                 (1 << 25)
+#       define RADEON_T2_EQ_TCUR                 (1 << 26)
+#       define RADEON_T3_EQ_TCUR                 (1 << 27)
+#       define RADEON_COLOR_ARG_MASK             0x1f
+#       define RADEON_COMP_ARG_SHIFT             15
+#define RADEON_PP_TXABLEND_0                0x1c64
+#define RADEON_PP_TXABLEND_1                0x1c7c
+#define RADEON_PP_TXABLEND_2                0x1c94
+#       define RADEON_ALPHA_ARG_A_SHIFT          0
+#       define RADEON_ALPHA_ARG_A_MASK           (0xf << 0)
+#       define RADEON_ALPHA_ARG_A_ZERO           (0   << 0)
+#       define RADEON_ALPHA_ARG_A_CURRENT_ALPHA  (1   << 0)
+#       define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA  (2   << 0)
+#       define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3   << 0)
+#       define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA  (4   << 0)
+#       define RADEON_ALPHA_ARG_A_T0_ALPHA       (5   << 0)
+#       define RADEON_ALPHA_ARG_A_T1_ALPHA       (6   << 0)
+#       define RADEON_ALPHA_ARG_A_T2_ALPHA       (7   << 0)
+#       define RADEON_ALPHA_ARG_A_T3_ALPHA       (8   << 0)
+#       define RADEON_ALPHA_ARG_B_SHIFT          4
+#       define RADEON_ALPHA_ARG_B_MASK           (0xf << 4)
+#       define RADEON_ALPHA_ARG_B_ZERO           (0   << 4)
+#       define RADEON_ALPHA_ARG_B_CURRENT_ALPHA  (1   << 4)
+#       define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA  (2   << 4)
+#       define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3   << 4)
+#       define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA  (4   << 4)
+#       define RADEON_ALPHA_ARG_B_T0_ALPHA       (5   << 4)
+#       define RADEON_ALPHA_ARG_B_T1_ALPHA       (6   << 4)
+#       define RADEON_ALPHA_ARG_B_T2_ALPHA       (7   << 4)
+#       define RADEON_ALPHA_ARG_B_T3_ALPHA       (8   << 4)
+#       define RADEON_ALPHA_ARG_C_SHIFT          8
+#       define RADEON_ALPHA_ARG_C_MASK           (0xf << 8)
+#       define RADEON_ALPHA_ARG_C_ZERO           (0   << 8)
+#       define RADEON_ALPHA_ARG_C_CURRENT_ALPHA  (1   << 8)
+#       define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA  (2   << 8)
+#       define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3   << 8)
+#       define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA  (4   << 8)
+#       define RADEON_ALPHA_ARG_C_T0_ALPHA       (5   << 8)
+#       define RADEON_ALPHA_ARG_C_T1_ALPHA       (6   << 8)
+#       define RADEON_ALPHA_ARG_C_T2_ALPHA       (7   << 8)
+#       define RADEON_ALPHA_ARG_C_T3_ALPHA       (8   << 8)
+#       define RADEON_DOT_ALPHA_DONT_REPLICATE   (1   << 9)
+#       define RADEON_ALPHA_ARG_MASK             0xf
+
+#define RADEON_PP_TFACTOR_0                 0x1c68
+#define RADEON_PP_TFACTOR_1                 0x1c80
+#define RADEON_PP_TFACTOR_2                 0x1c98
+
+#define RADEON_RB3D_BLENDCNTL               0x1c20
+#       define RADEON_COMB_FCN_MASK                    (3  << 12)
+#       define RADEON_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define RADEON_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define RADEON_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define RADEON_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define RADEON_SRC_BLEND_GL_ZERO                (32 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE                 (33 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_COLOR           (34 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+#       define RADEON_SRC_BLEND_GL_DST_COLOR           (36 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_ALPHA           (38 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+#       define RADEON_SRC_BLEND_GL_DST_ALPHA           (40 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE  (42 << 16)
+#       define RADEON_SRC_BLEND_MASK                   (63 << 16)
+#       define RADEON_DST_BLEND_GL_ZERO                (32 << 24)
+#       define RADEON_DST_BLEND_GL_ONE                 (33 << 24)
+#       define RADEON_DST_BLEND_GL_SRC_COLOR           (34 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+#       define RADEON_DST_BLEND_GL_DST_COLOR           (36 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+#       define RADEON_DST_BLEND_GL_SRC_ALPHA           (38 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+#       define RADEON_DST_BLEND_GL_DST_ALPHA           (40 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+#       define RADEON_DST_BLEND_MASK                   (63 << 24)
+#define RADEON_RB3D_CNTL                    0x1c3c
+#       define RADEON_ALPHA_BLEND_ENABLE       (1  <<  0)
+#       define RADEON_PLANE_MASK_ENABLE        (1  <<  1)
+#       define RADEON_DITHER_ENABLE            (1  <<  2)
+#       define RADEON_ROUND_ENABLE             (1  <<  3)
+#       define RADEON_SCALE_DITHER_ENABLE      (1  <<  4)
+#       define RADEON_DITHER_INIT              (1  <<  5)
+#       define RADEON_ROP_ENABLE               (1  <<  6)
+#       define RADEON_STENCIL_ENABLE           (1  <<  7)
+#       define RADEON_Z_ENABLE                 (1  <<  8)
+#       define RADEON_DEPTHXY_OFFSET_ENABLE    (1  <<  9)
+#       define RADEON_RB3D_COLOR_FORMAT_SHIFT  10
+
+#       define RADEON_COLOR_FORMAT_ARGB1555    3
+#       define RADEON_COLOR_FORMAT_RGB565      4
+#       define RADEON_COLOR_FORMAT_ARGB8888    6
+#       define RADEON_COLOR_FORMAT_RGB332      7
+#       define RADEON_COLOR_FORMAT_Y8          8
+#       define RADEON_COLOR_FORMAT_RGB8        9
+#       define RADEON_COLOR_FORMAT_YUV422_VYUY 11
+#       define RADEON_COLOR_FORMAT_YUV422_YVYU 12
+#       define RADEON_COLOR_FORMAT_aYUV444     14
+#       define RADEON_COLOR_FORMAT_ARGB4444    15
+
+#       define RADEON_CLRCMP_FLIP_ENABLE       (1  << 14)
+#define RADEON_RB3D_COLOROFFSET             0x1c40
+#       define RADEON_COLOROFFSET_MASK      0xfffffff0
+#define RADEON_RB3D_COLORPITCH              0x1c48
+#       define RADEON_COLORPITCH_MASK         0x000001ff8
+#       define RADEON_COLOR_TILE_ENABLE       (1 << 16)
+#       define RADEON_COLOR_MICROTILE_ENABLE  (1 << 17)
+#       define RADEON_COLOR_ENDIAN_NO_SWAP    (0 << 18)
+#       define RADEON_COLOR_ENDIAN_WORD_SWAP  (1 << 18)
+#       define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_DEPTHOFFSET             0x1c24
+#define RADEON_RB3D_DEPTHPITCH              0x1c28
+#       define RADEON_DEPTHPITCH_MASK         0x00001ff8
+#       define RADEON_DEPTH_ENDIAN_NO_SWAP    (0 << 18)
+#       define RADEON_DEPTH_ENDIAN_WORD_SWAP  (1 << 18)
+#       define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_PLANEMASK               0x1d84
+#define RADEON_RB3D_ROPCNTL                 0x1d80
+#       define RADEON_ROP_MASK              (15 << 8)
+#       define RADEON_ROP_CLEAR             (0  << 8)
+#       define RADEON_ROP_NOR               (1  << 8)
+#       define RADEON_ROP_AND_INVERTED      (2  << 8)
+#       define RADEON_ROP_COPY_INVERTED     (3  << 8)
+#       define RADEON_ROP_AND_REVERSE       (4  << 8)
+#       define RADEON_ROP_INVERT            (5  << 8)
+#       define RADEON_ROP_XOR               (6  << 8)
+#       define RADEON_ROP_NAND              (7  << 8)
+#       define RADEON_ROP_AND               (8  << 8)
+#       define RADEON_ROP_EQUIV             (9  << 8)
+#       define RADEON_ROP_NOOP              (10 << 8)
+#       define RADEON_ROP_OR_INVERTED       (11 << 8)
+#       define RADEON_ROP_COPY              (12 << 8)
+#       define RADEON_ROP_OR_REVERSE        (13 << 8)
+#       define RADEON_ROP_OR                (14 << 8)
+#       define RADEON_ROP_SET               (15 << 8)
+#define RADEON_RB3D_STENCILREFMASK          0x1d7c
+#       define RADEON_STENCIL_REF_SHIFT       0
+#       define RADEON_STENCIL_REF_MASK        (0xff << 0)
+#       define RADEON_STENCIL_MASK_SHIFT      16
+#       define RADEON_STENCIL_VALUE_MASK      (0xff << 16)
+#       define RADEON_STENCIL_WRITEMASK_SHIFT 24
+#       define RADEON_STENCIL_WRITE_MASK      (0xff << 24)
+#define RADEON_RB3D_ZSTENCILCNTL            0x1c2c
+#       define RADEON_DEPTH_FORMAT_MASK          (0xf << 0)
+#       define RADEON_DEPTH_FORMAT_16BIT_INT_Z   (0  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_INT_Z   (2  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_INT_Z   (4  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5  <<  0)
+#       define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 <<  0)
+#       define RADEON_Z_TEST_NEVER               (0  <<  4)
+#       define RADEON_Z_TEST_LESS                (1  <<  4)
+#       define RADEON_Z_TEST_LEQUAL              (2  <<  4)
+#       define RADEON_Z_TEST_EQUAL               (3  <<  4)
+#       define RADEON_Z_TEST_GEQUAL              (4  <<  4)
+#       define RADEON_Z_TEST_GREATER             (5  <<  4)
+#       define RADEON_Z_TEST_NEQUAL              (6  <<  4)
+#       define RADEON_Z_TEST_ALWAYS              (7  <<  4)
+#       define RADEON_Z_TEST_MASK                (7  <<  4)
+#       define RADEON_STENCIL_TEST_NEVER         (0  << 12)
+#       define RADEON_STENCIL_TEST_LESS          (1  << 12)
+#       define RADEON_STENCIL_TEST_LEQUAL        (2  << 12)
+#       define RADEON_STENCIL_TEST_EQUAL         (3  << 12)
+#       define RADEON_STENCIL_TEST_GEQUAL        (4  << 12)
+#       define RADEON_STENCIL_TEST_GREATER       (5  << 12)
+#       define RADEON_STENCIL_TEST_NEQUAL        (6  << 12)
+#       define RADEON_STENCIL_TEST_ALWAYS        (7  << 12)
+#       define RADEON_STENCIL_TEST_MASK          (0x7 << 12)
+#       define RADEON_STENCIL_FAIL_KEEP          (0  << 16)
+#       define RADEON_STENCIL_FAIL_ZERO          (1  << 16)
+#       define RADEON_STENCIL_FAIL_REPLACE       (2  << 16)
+#       define RADEON_STENCIL_FAIL_INC           (3  << 16)
+#       define RADEON_STENCIL_FAIL_DEC           (4  << 16)
+#       define RADEON_STENCIL_FAIL_INVERT        (5  << 16)
+#       define RADEON_STENCIL_FAIL_MASK          (0x7 << 16)
+#       define RADEON_STENCIL_ZPASS_KEEP         (0  << 20)
+#       define RADEON_STENCIL_ZPASS_ZERO         (1  << 20)
+#       define RADEON_STENCIL_ZPASS_REPLACE      (2  << 20)
+#       define RADEON_STENCIL_ZPASS_INC          (3  << 20)
+#       define RADEON_STENCIL_ZPASS_DEC          (4  << 20)
+#       define RADEON_STENCIL_ZPASS_INVERT       (5  << 20)
+#       define RADEON_STENCIL_ZPASS_MASK         (0x7 << 20)
+#       define RADEON_STENCIL_ZFAIL_KEEP         (0  << 24)
+#       define RADEON_STENCIL_ZFAIL_ZERO         (1  << 24)
+#       define RADEON_STENCIL_ZFAIL_REPLACE      (2  << 24)
+#       define RADEON_STENCIL_ZFAIL_INC          (3  << 24)
+#       define RADEON_STENCIL_ZFAIL_DEC          (4  << 24)
+#       define RADEON_STENCIL_ZFAIL_INVERT       (5  << 24)
+#       define RADEON_STENCIL_ZFAIL_MASK         (0x7 << 24)
+#       define RADEON_Z_COMPRESSION_ENABLE       (1  << 28)
+#       define RADEON_FORCE_Z_DIRTY              (1  << 29)
+#       define RADEON_Z_WRITE_ENABLE             (1  << 30)
+#define RADEON_RE_LINE_PATTERN              0x1cd0
+#       define RADEON_LINE_PATTERN_MASK             0x0000ffff
+#       define RADEON_LINE_REPEAT_COUNT_SHIFT       16
+#       define RADEON_LINE_PATTERN_START_SHIFT      24
+#       define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
+#       define RADEON_LINE_PATTERN_BIG_BIT_ORDER    (1 << 28)
+#       define RADEON_LINE_PATTERN_AUTO_RESET       (1 << 29)
+#define RADEON_RE_LINE_STATE                0x1cd4
+#       define RADEON_LINE_CURRENT_PTR_SHIFT   0
+#       define RADEON_LINE_CURRENT_COUNT_SHIFT 8
+#define RADEON_RE_MISC                      0x26c4
+#       define RADEON_STIPPLE_COORD_MASK       0x1f
+#       define RADEON_STIPPLE_X_OFFSET_SHIFT   0
+#       define RADEON_STIPPLE_X_OFFSET_MASK    (0x1f << 0)
+#       define RADEON_STIPPLE_Y_OFFSET_SHIFT   8
+#       define RADEON_STIPPLE_Y_OFFSET_MASK    (0x1f << 8)
+#       define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
+#       define RADEON_STIPPLE_BIG_BIT_ORDER    (1 << 16)
+#define RADEON_RE_SOLID_COLOR               0x1c1c
+#define RADEON_RE_TOP_LEFT                  0x26c0
+#       define RADEON_RE_LEFT_SHIFT         0
+#       define RADEON_RE_TOP_SHIFT          16
+#define RADEON_RE_WIDTH_HEIGHT              0x1c44
+#       define RADEON_RE_WIDTH_SHIFT        0
+#       define RADEON_RE_HEIGHT_SHIFT       16
+
+#define RADEON_RB3D_ZPASS_DATA 0x3290
+#define RADEON_RB3D_ZPASS_ADDR 0x3294
+
+#define RADEON_SE_CNTL                      0x1c4c
+#       define RADEON_FFACE_CULL_CW          (0 <<  0)
+#       define RADEON_FFACE_CULL_CCW         (1 <<  0)
+#       define RADEON_FFACE_CULL_DIR_MASK    (1 <<  0)
+#       define RADEON_BFACE_CULL             (0 <<  1)
+#       define RADEON_BFACE_SOLID            (3 <<  1)
+#       define RADEON_FFACE_CULL             (0 <<  3)
+#       define RADEON_FFACE_SOLID            (3 <<  3)
+#       define RADEON_FFACE_CULL_MASK        (3 <<  3)
+#       define RADEON_BADVTX_CULL_DISABLE    (1 <<  5)
+#       define RADEON_FLAT_SHADE_VTX_0       (0 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_1       (1 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_2       (2 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_LAST    (3 <<  6)
+#       define RADEON_DIFFUSE_SHADE_SOLID    (0 <<  8)
+#       define RADEON_DIFFUSE_SHADE_FLAT     (1 <<  8)
+#       define RADEON_DIFFUSE_SHADE_GOURAUD  (2 <<  8)
+#       define RADEON_DIFFUSE_SHADE_MASK     (3 <<  8)
+#       define RADEON_ALPHA_SHADE_SOLID      (0 << 10)
+#       define RADEON_ALPHA_SHADE_FLAT       (1 << 10)
+#       define RADEON_ALPHA_SHADE_GOURAUD    (2 << 10)
+#       define RADEON_ALPHA_SHADE_MASK       (3 << 10)
+#       define RADEON_SPECULAR_SHADE_SOLID   (0 << 12)
+#       define RADEON_SPECULAR_SHADE_FLAT    (1 << 12)
+#       define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+#       define RADEON_SPECULAR_SHADE_MASK    (3 << 12)
+#       define RADEON_FOG_SHADE_SOLID        (0 << 14)
+#       define RADEON_FOG_SHADE_FLAT         (1 << 14)
+#       define RADEON_FOG_SHADE_GOURAUD      (2 << 14)
+#       define RADEON_FOG_SHADE_MASK         (3 << 14)
+#       define RADEON_ZBIAS_ENABLE_POINT     (1 << 16)
+#       define RADEON_ZBIAS_ENABLE_LINE      (1 << 17)
+#       define RADEON_ZBIAS_ENABLE_TRI       (1 << 18)
+#       define RADEON_WIDELINE_ENABLE        (1 << 20)
+#       define RADEON_VPORT_XY_XFORM_ENABLE  (1 << 24)
+#       define RADEON_VPORT_Z_XFORM_ENABLE   (1 << 25)
+#       define RADEON_VTX_PIX_CENTER_D3D     (0 << 27)
+#       define RADEON_VTX_PIX_CENTER_OGL     (1 << 27)
+#       define RADEON_ROUND_MODE_TRUNC       (0 << 28)
+#       define RADEON_ROUND_MODE_ROUND       (1 << 28)
+#       define RADEON_ROUND_MODE_ROUND_EVEN  (2 << 28)
+#       define RADEON_ROUND_MODE_ROUND_ODD   (3 << 28)
+#       define RADEON_ROUND_PREC_16TH_PIX    (0 << 30)
+#       define RADEON_ROUND_PREC_8TH_PIX     (1 << 30)
+#       define RADEON_ROUND_PREC_4TH_PIX     (2 << 30)
+#       define RADEON_ROUND_PREC_HALF_PIX    (3 << 30)
+#define R200_RE_CNTL				0x1c50
+#       define R200_STIPPLE_ENABLE		0x1
+#       define R200_SCISSOR_ENABLE		0x2
+#       define R200_PATTERN_ENABLE		0x4
+#       define R200_PERSPECTIVE_ENABLE		0x8
+#       define R200_POINT_SMOOTH		0x20
+#       define R200_VTX_STQ0_D3D		0x00010000
+#       define R200_VTX_STQ1_D3D		0x00040000
+#       define R200_VTX_STQ2_D3D		0x00100000
+#       define R200_VTX_STQ3_D3D		0x00400000
+#       define R200_VTX_STQ4_D3D		0x01000000
+#       define R200_VTX_STQ5_D3D		0x04000000
+#define RADEON_SE_CNTL_STATUS               0x2140
+#       define RADEON_VC_NO_SWAP            (0 << 0)
+#       define RADEON_VC_16BIT_SWAP         (1 << 0)
+#       define RADEON_VC_32BIT_SWAP         (2 << 0)
+#       define RADEON_VC_HALF_DWORD_SWAP    (3 << 0)
+#       define RADEON_TCL_BYPASS            (1 << 8)
+#define RADEON_SE_COORD_FMT                 0x1c50
+#       define RADEON_VTX_XY_PRE_MULT_1_OVER_W0  (1 <<  0)
+#       define RADEON_VTX_Z_PRE_MULT_1_OVER_W0   (1 <<  1)
+#       define RADEON_VTX_ST0_NONPARAMETRIC      (1 <<  8)
+#       define RADEON_VTX_ST1_NONPARAMETRIC      (1 <<  9)
+#       define RADEON_VTX_ST2_NONPARAMETRIC      (1 << 10)
+#       define RADEON_VTX_ST3_NONPARAMETRIC      (1 << 11)
+#       define RADEON_VTX_W0_NORMALIZE           (1 << 12)
+#       define RADEON_VTX_W0_IS_NOT_1_OVER_W0    (1 << 16)
+#       define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
+#       define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
+#       define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
+#       define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
+#       define RADEON_TEX1_W_ROUTING_USE_W0      (0 << 26)
+#       define RADEON_TEX1_W_ROUTING_USE_Q1      (1 << 26)
+#define RADEON_SE_LINE_WIDTH                0x1db8
+#define RADEON_SE_TCL_LIGHT_MODEL_CTL       0x226c
+#       define RADEON_LIGHTING_ENABLE              (1 << 0)
+#       define RADEON_LIGHT_IN_MODELSPACE          (1 << 1)
+#       define RADEON_LOCAL_VIEWER                 (1 << 2)
+#       define RADEON_NORMALIZE_NORMALS            (1 << 3)
+#       define RADEON_RESCALE_NORMALS              (1 << 4)
+#       define RADEON_SPECULAR_LIGHTS              (1 << 5)
+#       define RADEON_DIFFUSE_SPECULAR_COMBINE     (1 << 6)
+#       define RADEON_LIGHT_ALPHA                  (1 << 7)
+#       define RADEON_LOCAL_LIGHT_VEC_GL           (1 << 8)
+#       define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
+#       define RADEON_LM_SOURCE_STATE_PREMULT      0
+#       define RADEON_LM_SOURCE_STATE_MULT         1
+#       define RADEON_LM_SOURCE_VERTEX_DIFFUSE     2
+#       define RADEON_LM_SOURCE_VERTEX_SPECULAR    3
+#       define RADEON_EMISSIVE_SOURCE_SHIFT        16
+#       define RADEON_AMBIENT_SOURCE_SHIFT         18
+#       define RADEON_DIFFUSE_SOURCE_SHIFT         20
+#       define RADEON_SPECULAR_SOURCE_SHIFT        22
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED     0x2220
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN   0x2224
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE    0x2228
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA   0x222c
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED     0x2230
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN   0x2234
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE    0x2238
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA   0x223c
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED   0x2210
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE  0x2218
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED    0x2240
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN  0x2244
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE   0x2248
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA  0x224c
+#define RADEON_SE_TCL_MATRIX_SELECT_0       0x225c
+#       define RADEON_MODELVIEW_0_SHIFT        0
+#       define RADEON_MODELVIEW_1_SHIFT        4
+#       define RADEON_MODELVIEW_2_SHIFT        8
+#       define RADEON_MODELVIEW_3_SHIFT        12
+#       define RADEON_IT_MODELVIEW_0_SHIFT     16
+#       define RADEON_IT_MODELVIEW_1_SHIFT     20
+#       define RADEON_IT_MODELVIEW_2_SHIFT     24
+#       define RADEON_IT_MODELVIEW_3_SHIFT     28
+#define RADEON_SE_TCL_MATRIX_SELECT_1       0x2260
+#       define RADEON_MODELPROJECT_0_SHIFT     0
+#       define RADEON_MODELPROJECT_1_SHIFT     4
+#       define RADEON_MODELPROJECT_2_SHIFT     8
+#       define RADEON_MODELPROJECT_3_SHIFT     12
+#       define RADEON_TEXMAT_0_SHIFT           16
+#       define RADEON_TEXMAT_1_SHIFT           20
+#       define RADEON_TEXMAT_2_SHIFT           24
+#       define RADEON_TEXMAT_3_SHIFT           28
+
+
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT        0x2254
+#       define RADEON_TCL_VTX_W0                 (1 <<  0)
+#       define RADEON_TCL_VTX_FP_DIFFUSE         (1 <<  1)
+#       define RADEON_TCL_VTX_FP_ALPHA           (1 <<  2)
+#       define RADEON_TCL_VTX_PK_DIFFUSE         (1 <<  3)
+#       define RADEON_TCL_VTX_FP_SPEC            (1 <<  4)
+#       define RADEON_TCL_VTX_FP_FOG             (1 <<  5)
+#       define RADEON_TCL_VTX_PK_SPEC            (1 <<  6)
+#       define RADEON_TCL_VTX_ST0                (1 <<  7)
+#       define RADEON_TCL_VTX_ST1                (1 <<  8)
+#       define RADEON_TCL_VTX_Q1                 (1 <<  9)
+#       define RADEON_TCL_VTX_ST2                (1 << 10)
+#       define RADEON_TCL_VTX_Q2                 (1 << 11)
+#       define RADEON_TCL_VTX_ST3                (1 << 12)
+#       define RADEON_TCL_VTX_Q3                 (1 << 13)
+#       define RADEON_TCL_VTX_Q0                 (1 << 14)
+#       define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
+#       define RADEON_TCL_VTX_NORM0              (1 << 18)
+#       define RADEON_TCL_VTX_XY1                (1 << 27)
+#       define RADEON_TCL_VTX_Z1                 (1 << 28)
+#       define RADEON_TCL_VTX_W1                 (1 << 29)
+#       define RADEON_TCL_VTX_NORM1              (1 << 30)
+#       define RADEON_TCL_VTX_Z0                 (1U << 31)
+
+#define RADEON_SE_TCL_OUTPUT_VTX_SEL        0x2258
+#       define RADEON_TCL_COMPUTE_XYZW           (1 << 0)
+#       define RADEON_TCL_COMPUTE_DIFFUSE        (1 << 1)
+#       define RADEON_TCL_COMPUTE_SPECULAR       (1 << 2)
+#       define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
+#       define RADEON_TCL_FORCE_INORDER_PROC     (1 << 4)
+#       define RADEON_TCL_TEX_INPUT_TEX_0        0
+#       define RADEON_TCL_TEX_INPUT_TEX_1        1
+#       define RADEON_TCL_TEX_INPUT_TEX_2        2
+#       define RADEON_TCL_TEX_INPUT_TEX_3        3
+#       define RADEON_TCL_TEX_COMPUTED_TEX_0     8
+#       define RADEON_TCL_TEX_COMPUTED_TEX_1     9
+#       define RADEON_TCL_TEX_COMPUTED_TEX_2     10
+#       define RADEON_TCL_TEX_COMPUTED_TEX_3     11
+#       define RADEON_TCL_TEX_0_OUTPUT_SHIFT     16
+#       define RADEON_TCL_TEX_1_OUTPUT_SHIFT     20
+#       define RADEON_TCL_TEX_2_OUTPUT_SHIFT     24
+#       define RADEON_TCL_TEX_3_OUTPUT_SHIFT     28
+
+#define RADEON_SE_TCL_PER_LIGHT_CTL_0       0x2270
+#       define RADEON_LIGHT_0_ENABLE               (1 <<  0)
+#       define RADEON_LIGHT_0_ENABLE_AMBIENT       (1 <<  1)
+#       define RADEON_LIGHT_0_ENABLE_SPECULAR      (1 <<  2)
+#       define RADEON_LIGHT_0_IS_LOCAL             (1 <<  3)
+#       define RADEON_LIGHT_0_IS_SPOT              (1 <<  4)
+#       define RADEON_LIGHT_0_DUAL_CONE            (1 <<  5)
+#       define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN   (1 <<  6)
+#       define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 <<  7)
+#       define RADEON_LIGHT_0_SHIFT                0
+#       define RADEON_LIGHT_1_ENABLE               (1 << 16)
+#       define RADEON_LIGHT_1_ENABLE_AMBIENT       (1 << 17)
+#       define RADEON_LIGHT_1_ENABLE_SPECULAR      (1 << 18)
+#       define RADEON_LIGHT_1_IS_LOCAL             (1 << 19)
+#       define RADEON_LIGHT_1_IS_SPOT              (1 << 20)
+#       define RADEON_LIGHT_1_DUAL_CONE            (1 << 21)
+#       define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN   (1 << 22)
+#       define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
+#       define RADEON_LIGHT_1_SHIFT                16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_1       0x2274
+#       define RADEON_LIGHT_2_SHIFT            0
+#       define RADEON_LIGHT_3_SHIFT            16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_2       0x2278
+#       define RADEON_LIGHT_4_SHIFT            0
+#       define RADEON_LIGHT_5_SHIFT            16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_3       0x227c
+#       define RADEON_LIGHT_6_SHIFT            0
+#       define RADEON_LIGHT_7_SHIFT            16
+
+#define RADEON_SE_TCL_SHININESS             0x2250
+
+#define RADEON_SE_TCL_TEXTURE_PROC_CTL      0x2268
+#       define RADEON_TEXGEN_TEXMAT_0_ENABLE      (1 << 0)
+#       define RADEON_TEXGEN_TEXMAT_1_ENABLE      (1 << 1)
+#       define RADEON_TEXGEN_TEXMAT_2_ENABLE      (1 << 2)
+#       define RADEON_TEXGEN_TEXMAT_3_ENABLE      (1 << 3)
+#       define RADEON_TEXMAT_0_ENABLE             (1 << 4)
+#       define RADEON_TEXMAT_1_ENABLE             (1 << 5)
+#       define RADEON_TEXMAT_2_ENABLE             (1 << 6)
+#       define RADEON_TEXMAT_3_ENABLE             (1 << 7)
+#       define RADEON_TEXGEN_INPUT_MASK           0xf
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_0     0
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_1     1
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_2     2
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_3     3
+#       define RADEON_TEXGEN_INPUT_OBJ            4
+#       define RADEON_TEXGEN_INPUT_EYE            5
+#       define RADEON_TEXGEN_INPUT_EYE_NORMAL     6
+#       define RADEON_TEXGEN_INPUT_EYE_REFLECT    7
+#       define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
+#       define RADEON_TEXGEN_0_INPUT_SHIFT        16
+#       define RADEON_TEXGEN_1_INPUT_SHIFT        20
+#       define RADEON_TEXGEN_2_INPUT_SHIFT        24
+#       define RADEON_TEXGEN_3_INPUT_SHIFT        28
+
+#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL    0x2264
+#       define RADEON_UCP_IN_CLIP_SPACE            (1 <<  0)
+#       define RADEON_UCP_IN_MODEL_SPACE           (1 <<  1)
+#       define RADEON_UCP_ENABLE_0                 (1 <<  2)
+#       define RADEON_UCP_ENABLE_1                 (1 <<  3)
+#       define RADEON_UCP_ENABLE_2                 (1 <<  4)
+#       define RADEON_UCP_ENABLE_3                 (1 <<  5)
+#       define RADEON_UCP_ENABLE_4                 (1 <<  6)
+#       define RADEON_UCP_ENABLE_5                 (1 <<  7)
+#       define RADEON_TCL_FOG_MASK                 (3 <<  8)
+#       define RADEON_TCL_FOG_DISABLE              (0 <<  8)
+#       define RADEON_TCL_FOG_EXP                  (1 <<  8)
+#       define RADEON_TCL_FOG_EXP2                 (2 <<  8)
+#       define RADEON_TCL_FOG_LINEAR               (3 <<  8)
+#       define RADEON_RNG_BASED_FOG                (1 << 10)
+#       define RADEON_LIGHT_TWOSIDE                (1 << 11)
+#       define RADEON_BLEND_OP_COUNT_MASK          (7 << 12)
+#       define RADEON_BLEND_OP_COUNT_SHIFT         12
+#       define RADEON_POSITION_BLEND_OP_ENABLE     (1 << 16)
+#       define RADEON_NORMAL_BLEND_OP_ENABLE       (1 << 17)
+#       define RADEON_VERTEX_BLEND_SRC_0_PRIMARY   (1 << 18)
+#       define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
+#       define RADEON_VERTEX_BLEND_SRC_1_PRIMARY   (1 << 19)
+#       define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
+#       define RADEON_VERTEX_BLEND_SRC_2_PRIMARY   (1 << 20)
+#       define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
+#       define RADEON_VERTEX_BLEND_SRC_3_PRIMARY   (1 << 21)
+#       define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
+#       define RADEON_VERTEX_BLEND_WGT_MINUS_ONE   (1 << 22)
+#       define RADEON_CULL_FRONT_IS_CW             (0 << 28)
+#       define RADEON_CULL_FRONT_IS_CCW            (1 << 28)
+#       define RADEON_CULL_FRONT                   (1 << 29)
+#       define RADEON_CULL_BACK                    (1 << 30)
+#       define RADEON_FORCE_W_TO_ONE               (1U << 31)
+
+#define RADEON_SE_VPORT_XSCALE              0x1d98
+#define RADEON_SE_VPORT_XOFFSET             0x1d9c
+#define RADEON_SE_VPORT_YSCALE              0x1da0
+#define RADEON_SE_VPORT_YOFFSET             0x1da4
+#define RADEON_SE_VPORT_ZSCALE              0x1da8
+#define RADEON_SE_VPORT_ZOFFSET             0x1dac
+#define RADEON_SE_ZBIAS_FACTOR              0x1db0
+#define RADEON_SE_ZBIAS_CONSTANT            0x1db4
+
+#define RADEON_SE_VTX_FMT                   0x2080
+#       define RADEON_SE_VTX_FMT_XY         0x00000000
+#       define RADEON_SE_VTX_FMT_W0         0x00000001
+#       define RADEON_SE_VTX_FMT_FPCOLOR    0x00000002
+#       define RADEON_SE_VTX_FMT_FPALPHA    0x00000004
+#       define RADEON_SE_VTX_FMT_PKCOLOR    0x00000008
+#       define RADEON_SE_VTX_FMT_FPSPEC     0x00000010
+#       define RADEON_SE_VTX_FMT_FPFOG      0x00000020
+#       define RADEON_SE_VTX_FMT_PKSPEC     0x00000040
+#       define RADEON_SE_VTX_FMT_ST0        0x00000080
+#       define RADEON_SE_VTX_FMT_ST1        0x00000100
+#       define RADEON_SE_VTX_FMT_Q1         0x00000200
+#       define RADEON_SE_VTX_FMT_ST2        0x00000400
+#       define RADEON_SE_VTX_FMT_Q2         0x00000800
+#       define RADEON_SE_VTX_FMT_ST3        0x00001000
+#       define RADEON_SE_VTX_FMT_Q3         0x00002000
+#       define RADEON_SE_VTX_FMT_Q0         0x00004000
+#       define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK  0x00038000
+#       define RADEON_SE_VTX_FMT_N0         0x00040000
+#       define RADEON_SE_VTX_FMT_XY1        0x08000000
+#       define RADEON_SE_VTX_FMT_Z1         0x10000000
+#       define RADEON_SE_VTX_FMT_W1         0x20000000
+#       define RADEON_SE_VTX_FMT_N1         0x40000000
+#       define RADEON_SE_VTX_FMT_Z          0x80000000
+
+#define RADEON_SE_VF_CNTL                             0x2084
+#       define RADEON_VF_PRIM_TYPE_POINT_LIST         1
+#       define RADEON_VF_PRIM_TYPE_LINE_LIST          2
+#       define RADEON_VF_PRIM_TYPE_LINE_STRIP         3
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST      4
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN       5
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP     6
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG      7
+#       define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST     8
+#       define RADEON_VF_PRIM_TYPE_POINT_LIST_3       9
+#       define RADEON_VF_PRIM_TYPE_LINE_LIST_3        10
+#       define RADEON_VF_PRIM_TYPE_SPIRIT_LIST        11
+#       define RADEON_VF_PRIM_TYPE_LINE_LOOP          12
+#       define RADEON_VF_PRIM_TYPE_QUAD_LIST          13
+#       define RADEON_VF_PRIM_TYPE_QUAD_STRIP         14
+#       define RADEON_VF_PRIM_TYPE_POLYGON            15
+#       define RADEON_VF_PRIM_WALK_STATE              (0<<4)
+#       define RADEON_VF_PRIM_WALK_INDEX              (1<<4)
+#       define RADEON_VF_PRIM_WALK_LIST               (2<<4)
+#       define RADEON_VF_PRIM_WALK_DATA               (3<<4)
+#       define RADEON_VF_COLOR_ORDER_RGBA             (1<<6)
+#       define RADEON_VF_RADEON_MODE                  (1<<8)
+#       define RADEON_VF_TCL_OUTPUT_CTL_ENA           (1<<9)
+#       define RADEON_VF_PROG_STREAM_ENA              (1<<10)
+#       define RADEON_VF_INDEX_SIZE_SHIFT             11
+#       define RADEON_VF_NUM_VERTICES_SHIFT           16
+
+#define RADEON_SE_PORT_DATA0			0x2000
+
+#define R200_SE_VAP_CNTL			0x2080
+#       define R200_VAP_TCL_ENABLE		0x00000001
+#       define R200_VAP_SINGLE_BUF_STATE_ENABLE	0x00000010
+#       define R200_VAP_FORCE_W_TO_ONE		0x00010000
+#       define R200_VAP_D3D_TEX_DEFAULT		0x00020000
+#       define R200_VAP_VF_MAX_VTX_NUM__SHIFT	18
+#       define R200_VAP_VF_MAX_VTX_NUM		(9 << 18)
+#       define R200_VAP_DX_CLIP_SPACE_DEF	0x00400000
+#define R200_VF_MAX_VTX_INDX			0x210c
+#define R200_VF_MIN_VTX_INDX			0x2110
+#define R200_SE_VTE_CNTL			0x20b0
+#       define R200_VPORT_X_SCALE_ENA			0x00000001
+#       define R200_VPORT_X_OFFSET_ENA			0x00000002
+#       define R200_VPORT_Y_SCALE_ENA			0x00000004
+#       define R200_VPORT_Y_OFFSET_ENA			0x00000008
+#       define R200_VPORT_Z_SCALE_ENA			0x00000010
+#       define R200_VPORT_Z_OFFSET_ENA			0x00000020
+#       define R200_VTX_XY_FMT				0x00000100
+#       define R200_VTX_Z_FMT				0x00000200
+#       define R200_VTX_W0_FMT				0x00000400
+#       define R200_VTX_W0_NORMALIZE			0x00000800
+#       define R200_VTX_ST_DENORMALIZED		0x00001000
+#define R200_SE_VAP_CNTL_STATUS			0x2140
+#       define R200_VC_NO_SWAP			(0 << 0)
+#       define R200_VC_16BIT_SWAP		(1 << 0)
+#       define R200_VC_32BIT_SWAP		(2 << 0)
+#define R200_PP_TXFILTER_0			0x2c00
+#define R200_PP_TXFILTER_1			0x2c20
+#define R200_PP_TXFILTER_2			0x2c40
+#define R200_PP_TXFILTER_3			0x2c60
+#define R200_PP_TXFILTER_4			0x2c80
+#define R200_PP_TXFILTER_5			0x2ca0
+#       define R200_MAG_FILTER_NEAREST		(0  <<  0)
+#       define R200_MAG_FILTER_LINEAR		(1  <<  0)
+#       define R200_MAG_FILTER_MASK		(1  <<  0)
+#       define R200_MIN_FILTER_NEAREST		(0  <<  1)
+#       define R200_MIN_FILTER_LINEAR		(1  <<  1)
+#       define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2  <<  1)
+#       define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3  <<  1)
+#       define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6  <<  1)
+#       define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7  <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST	(8  <<  1)
+#       define R200_MIN_FILTER_ANISO_LINEAR	(9  <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 <<  1)
+#       define R200_MIN_FILTER_MASK		(15 <<  1)
+#       define R200_MAX_ANISO_1_TO_1		(0  <<  5)
+#       define R200_MAX_ANISO_2_TO_1		(1  <<  5)
+#       define R200_MAX_ANISO_4_TO_1		(2  <<  5)
+#       define R200_MAX_ANISO_8_TO_1		(3  <<  5)
+#       define R200_MAX_ANISO_16_TO_1		(4  <<  5)
+#       define R200_MAX_ANISO_MASK		(7  <<  5)
+#       define R200_MAX_MIP_LEVEL_MASK		(0x0f << 16)
+#       define R200_MAX_MIP_LEVEL_SHIFT		16
+#       define R200_YUV_TO_RGB			(1  << 20)
+#       define R200_YUV_TEMPERATURE_COOL	(0  << 21)
+#       define R200_YUV_TEMPERATURE_HOT		(1  << 21)
+#       define R200_YUV_TEMPERATURE_MASK	(1  << 21)
+#       define R200_WRAPEN_S			(1  << 22)
+#       define R200_CLAMP_S_WRAP		(0  << 23)
+#       define R200_CLAMP_S_MIRROR		(1  << 23)
+#       define R200_CLAMP_S_CLAMP_LAST		(2  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_LAST	(3  << 23)
+#       define R200_CLAMP_S_CLAMP_BORDER	(4  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_BORDER	(5  << 23)
+#       define R200_CLAMP_S_CLAMP_GL		(6  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_GL	(7  << 23)
+#       define R200_CLAMP_S_MASK		(7  << 23)
+#       define R200_WRAPEN_T			(1  << 26)
+#       define R200_CLAMP_T_WRAP		(0  << 27)
+#       define R200_CLAMP_T_MIRROR		(1  << 27)
+#       define R200_CLAMP_T_CLAMP_LAST		(2  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_LAST	(3  << 27)
+#       define R200_CLAMP_T_CLAMP_BORDER	(4  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_BORDER	(5  << 27)
+#       define R200_CLAMP_T_CLAMP_GL		(6  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_GL	(7  << 27)
+#       define R200_CLAMP_T_MASK		(7  << 27)
+#       define R200_KILL_LT_ZERO		(1  << 30)
+#       define R200_BORDER_MODE_OGL		(0  << 31)
+#       define R200_BORDER_MODE_D3D		(1  << 31)
+#define R200_PP_TXFORMAT_0			0x2c04
+#define R200_PP_TXFORMAT_1			0x2c24
+#define R200_PP_TXFORMAT_2			0x2c44
+#define R200_PP_TXFORMAT_3			0x2c64
+#define R200_PP_TXFORMAT_4			0x2c84
+#define R200_PP_TXFORMAT_5			0x2ca4
+#       define R200_TXFORMAT_I8			(0 << 0)
+#       define R200_TXFORMAT_AI88		(1 << 0)
+#       define R200_TXFORMAT_RGB332		(2 << 0)
+#       define R200_TXFORMAT_ARGB1555		(3 << 0)
+#       define R200_TXFORMAT_RGB565		(4 << 0)
+#       define R200_TXFORMAT_ARGB4444		(5 << 0)
+#       define R200_TXFORMAT_ARGB8888		(6 << 0)
+#       define R200_TXFORMAT_RGBA8888		(7 << 0)
+#       define R200_TXFORMAT_Y8			(8 << 0)
+#       define R200_TXFORMAT_AVYU4444		(9 << 0)
+#       define R200_TXFORMAT_VYUY422		(10 << 0)
+#       define R200_TXFORMAT_YVYU422		(11 << 0)
+#       define R200_TXFORMAT_DXT1		(12 << 0)
+#       define R200_TXFORMAT_DXT23		(14 << 0)
+#       define R200_TXFORMAT_DXT45		(15 << 0)
+#       define R200_TXFORMAT_DVDU88		(18 << 0)
+#       define R200_TXFORMAT_LDVDU655		(19 << 0)
+#       define R200_TXFORMAT_LDVDU8888		(20 << 0)
+#       define R200_TXFORMAT_GR1616		(21 << 0)
+#       define R200_TXFORMAT_ABGR8888		(22 << 0)
+#       define R200_TXFORMAT_BGR111110		(23 << 0)
+#       define R200_TXFORMAT_FORMAT_MASK	(31 <<	0)
+#       define R200_TXFORMAT_FORMAT_SHIFT	0
+#       define R200_TXFORMAT_ALPHA_IN_MAP	(1 << 6)
+#       define R200_TXFORMAT_NON_POWER2		(1 << 7)
+#       define R200_TXFORMAT_WIDTH_MASK		(15 <<	8)
+#       define R200_TXFORMAT_WIDTH_SHIFT	8
+#       define R200_TXFORMAT_HEIGHT_MASK	(15 << 12)
+#       define R200_TXFORMAT_HEIGHT_SHIFT	12
+#       define R200_TXFORMAT_F5_WIDTH_MASK	(15 << 16)	/* cube face 5 */
+#       define R200_TXFORMAT_F5_WIDTH_SHIFT	16
+#       define R200_TXFORMAT_F5_HEIGHT_MASK	(15 << 20)
+#       define R200_TXFORMAT_F5_HEIGHT_SHIFT	20
+#       define R200_TXFORMAT_ST_ROUTE_STQ0	(0 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ1	(1 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ2	(2 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ3	(3 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ4	(4 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ5	(5 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_MASK	(7 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_SHIFT	24
+#       define R200_TXFORMAT_LOOKUP_DISABLE	(1 << 27)
+#       define R200_TXFORMAT_ALPHA_MASK_ENABLE	(1 << 28)
+#       define R200_TXFORMAT_CHROMA_KEY_ENABLE	(1 << 29)
+#       define R200_TXFORMAT_CUBIC_MAP_ENABLE		(1 << 30)
+#define R200_PP_TXFORMAT_X_0                    0x2c08
+#define R200_PP_TXFORMAT_X_1                    0x2c28
+#define R200_PP_TXFORMAT_X_2                    0x2c48
+#define R200_PP_TXFORMAT_X_3                    0x2c68
+#define R200_PP_TXFORMAT_X_4                    0x2c88
+#define R200_PP_TXFORMAT_X_5                    0x2ca8
+
+#define R200_PP_TXSIZE_0			0x2c0c /* NPOT only */
+#define R200_PP_TXSIZE_1			0x2c2c /* NPOT only */
+#define R200_PP_TXSIZE_2			0x2c4c /* NPOT only */
+#define R200_PP_TXSIZE_3			0x2c6c /* NPOT only */
+#define R200_PP_TXSIZE_4			0x2c8c /* NPOT only */
+#define R200_PP_TXSIZE_5			0x2cac /* NPOT only */
+
+#define R200_PP_TXPITCH_0                       0x2c10 /* NPOT only */
+#define R200_PP_TXPITCH_1			0x2c30 /* NPOT only */
+#define R200_PP_TXPITCH_2			0x2c50 /* NPOT only */
+#define R200_PP_TXPITCH_3			0x2c70 /* NPOT only */
+#define R200_PP_TXPITCH_4			0x2c90 /* NPOT only */
+#define R200_PP_TXPITCH_5			0x2cb0 /* NPOT only */
+
+#define R200_PP_CUBIC_FACES_0			0x2c18
+#define R200_PP_CUBIC_FACES_1			0x2c38
+#define R200_PP_CUBIC_FACES_2			0x2c58
+#define R200_PP_CUBIC_FACES_3			0x2c78
+#define R200_PP_CUBIC_FACES_4			0x2c98
+#define R200_PP_CUBIC_FACES_5			0x2cb8
+
+#define R200_PP_TXOFFSET_0			0x2d00
+#       define R200_TXO_ENDIAN_NO_SWAP		(0 << 0)
+#       define R200_TXO_ENDIAN_BYTE_SWAP	(1 << 0)
+#       define R200_TXO_ENDIAN_WORD_SWAP	(2 << 0)
+#       define R200_TXO_ENDIAN_HALFDW_SWAP	(3 << 0)
+#       define R200_TXO_MACRO_LINEAR		(0 << 2)
+#       define R200_TXO_MACRO_TILE		(1 << 2)
+#       define R200_TXO_MICRO_LINEAR		(0 << 3)
+#       define R200_TXO_MICRO_TILE		(1 << 3)
+#       define R200_TXO_OFFSET_MASK		0xffffffe0
+#       define R200_TXO_OFFSET_SHIFT		5
+#define R200_PP_CUBIC_OFFSET_F1_0         0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0         0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0         0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0         0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0         0x2d14
+
+#define R200_PP_TXOFFSET_1			0x2d18
+#define R200_PP_CUBIC_OFFSET_F1_1         0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1         0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1         0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1         0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1         0x2d2c
+
+#define R200_PP_TXOFFSET_2			0x2d30
+#define R200_PP_CUBIC_OFFSET_F1_2         0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2         0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2         0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2         0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2         0x2d44
+
+#define R200_PP_TXOFFSET_3			0x2d48
+#define R200_PP_CUBIC_OFFSET_F1_3         0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3         0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3         0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3         0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3         0x2d5c
+#define R200_PP_TXOFFSET_4			0x2d60
+#define R200_PP_CUBIC_OFFSET_F1_4         0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4         0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4         0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4         0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4         0x2d74
+#define R200_PP_TXOFFSET_5			0x2d78
+#define R200_PP_CUBIC_OFFSET_F1_5         0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5         0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5         0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5         0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5         0x2d8c
+
+#define R200_PP_TFACTOR_0			0x2ee0
+#define R200_PP_TFACTOR_1			0x2ee4
+#define R200_PP_TFACTOR_2			0x2ee8
+#define R200_PP_TFACTOR_3			0x2eec
+#define R200_PP_TFACTOR_4			0x2ef0
+#define R200_PP_TFACTOR_5			0x2ef4
+
+#define R200_PP_TXCBLEND_0			0x2f00
+#       define R200_TXC_ARG_A_ZERO		(0)
+#       define R200_TXC_ARG_A_CURRENT_COLOR	(2)
+#       define R200_TXC_ARG_A_CURRENT_ALPHA	(3)
+#       define R200_TXC_ARG_A_DIFFUSE_COLOR	(4)
+#       define R200_TXC_ARG_A_DIFFUSE_ALPHA	(5)
+#       define R200_TXC_ARG_A_SPECULAR_COLOR	(6)
+#       define R200_TXC_ARG_A_SPECULAR_ALPHA	(7)
+#       define R200_TXC_ARG_A_TFACTOR_COLOR	(8)
+#       define R200_TXC_ARG_A_TFACTOR_ALPHA	(9)
+#       define R200_TXC_ARG_A_R0_COLOR		(10)
+#       define R200_TXC_ARG_A_R0_ALPHA		(11)
+#       define R200_TXC_ARG_A_R1_COLOR		(12)
+#       define R200_TXC_ARG_A_R1_ALPHA		(13)
+#       define R200_TXC_ARG_A_R2_COLOR		(14)
+#       define R200_TXC_ARG_A_R2_ALPHA		(15)
+#       define R200_TXC_ARG_A_R3_COLOR		(16)
+#       define R200_TXC_ARG_A_R3_ALPHA		(17)
+#       define R200_TXC_ARG_A_R4_COLOR		(18)
+#       define R200_TXC_ARG_A_R4_ALPHA		(19)
+#       define R200_TXC_ARG_A_R5_COLOR		(20)
+#       define R200_TXC_ARG_A_R5_ALPHA		(21)
+#       define R200_TXC_ARG_A_TFACTOR1_COLOR	(26)
+#       define R200_TXC_ARG_A_TFACTOR1_ALPHA	(27)
+#       define R200_TXC_ARG_A_MASK		(31 << 0)
+#       define R200_TXC_ARG_A_SHIFT		0
+#       define R200_TXC_ARG_B_ZERO		(0 << 5)
+#       define R200_TXC_ARG_B_CURRENT_COLOR	(2 << 5)
+#       define R200_TXC_ARG_B_CURRENT_ALPHA	(3 << 5)
+#       define R200_TXC_ARG_B_DIFFUSE_COLOR	(4 << 5)
+#       define R200_TXC_ARG_B_DIFFUSE_ALPHA	(5 << 5)
+#       define R200_TXC_ARG_B_SPECULAR_COLOR	(6 << 5)
+#       define R200_TXC_ARG_B_SPECULAR_ALPHA	(7 << 5)
+#       define R200_TXC_ARG_B_TFACTOR_COLOR	(8 << 5)
+#       define R200_TXC_ARG_B_TFACTOR_ALPHA	(9 << 5)
+#       define R200_TXC_ARG_B_R0_COLOR		(10 << 5)
+#       define R200_TXC_ARG_B_R0_ALPHA		(11 << 5)
+#       define R200_TXC_ARG_B_R1_COLOR		(12 << 5)
+#       define R200_TXC_ARG_B_R1_ALPHA		(13 << 5)
+#       define R200_TXC_ARG_B_R2_COLOR		(14 << 5)
+#       define R200_TXC_ARG_B_R2_ALPHA		(15 << 5)
+#       define R200_TXC_ARG_B_R3_COLOR		(16 << 5)
+#       define R200_TXC_ARG_B_R3_ALPHA		(17 << 5)
+#       define R200_TXC_ARG_B_R4_COLOR		(18 << 5)
+#       define R200_TXC_ARG_B_R4_ALPHA		(19 << 5)
+#       define R200_TXC_ARG_B_R5_COLOR		(20 << 5)
+#       define R200_TXC_ARG_B_R5_ALPHA		(21 << 5)
+#       define R200_TXC_ARG_B_TFACTOR1_COLOR	(26 << 5)
+#       define R200_TXC_ARG_B_TFACTOR1_ALPHA	(27 << 5)
+#       define R200_TXC_ARG_B_MASK		(31 << 5)
+#       define R200_TXC_ARG_B_SHIFT		5
+#       define R200_TXC_ARG_C_ZERO		(0 << 10)
+#       define R200_TXC_ARG_C_CURRENT_COLOR	(2 << 10)
+#       define R200_TXC_ARG_C_CURRENT_ALPHA	(3 << 10)
+#       define R200_TXC_ARG_C_DIFFUSE_COLOR	(4 << 10)
+#       define R200_TXC_ARG_C_DIFFUSE_ALPHA	(5 << 10)
+#       define R200_TXC_ARG_C_SPECULAR_COLOR	(6 << 10)
+#       define R200_TXC_ARG_C_SPECULAR_ALPHA	(7 << 10)
+#       define R200_TXC_ARG_C_TFACTOR_COLOR	(8 << 10)
+#       define R200_TXC_ARG_C_TFACTOR_ALPHA	(9 << 10)
+#       define R200_TXC_ARG_C_R0_COLOR		(10 << 10)
+#       define R200_TXC_ARG_C_R0_ALPHA		(11 << 10)
+#       define R200_TXC_ARG_C_R1_COLOR		(12 << 10)
+#       define R200_TXC_ARG_C_R1_ALPHA		(13 << 10)
+#       define R200_TXC_ARG_C_R2_COLOR		(14 << 10)
+#       define R200_TXC_ARG_C_R2_ALPHA		(15 << 10)
+#       define R200_TXC_ARG_C_R3_COLOR		(16 << 10)
+#       define R200_TXC_ARG_C_R3_ALPHA		(17 << 10)
+#       define R200_TXC_ARG_C_R4_COLOR		(18 << 10)
+#       define R200_TXC_ARG_C_R4_ALPHA		(19 << 10)
+#       define R200_TXC_ARG_C_R5_COLOR		(20 << 10)
+#       define R200_TXC_ARG_C_R5_ALPHA		(21 << 10)
+#       define R200_TXC_ARG_C_TFACTOR1_COLOR	(26 << 10)
+#       define R200_TXC_ARG_C_TFACTOR1_ALPHA	(27 << 10)
+#       define R200_TXC_ARG_C_MASK		(31 << 10)
+#       define R200_TXC_ARG_C_SHIFT		10
+#       define R200_TXC_COMP_ARG_A		(1 << 16)
+#       define R200_TXC_COMP_ARG_A_SHIFT	(16)
+#       define R200_TXC_BIAS_ARG_A		(1 << 17)
+#       define R200_TXC_SCALE_ARG_A		(1 << 18)
+#       define R200_TXC_NEG_ARG_A		(1 << 19)
+#       define R200_TXC_COMP_ARG_B		(1 << 20)
+#       define R200_TXC_COMP_ARG_B_SHIFT	(20)
+#       define R200_TXC_BIAS_ARG_B		(1 << 21)
+#       define R200_TXC_SCALE_ARG_B		(1 << 22)
+#       define R200_TXC_NEG_ARG_B		(1 << 23)
+#       define R200_TXC_COMP_ARG_C		(1 << 24)
+#       define R200_TXC_COMP_ARG_C_SHIFT	(24)
+#       define R200_TXC_BIAS_ARG_C		(1 << 25)
+#       define R200_TXC_SCALE_ARG_C		(1 << 26)
+#       define R200_TXC_NEG_ARG_C		(1 << 27)
+#       define R200_TXC_OP_MADD			(0 << 28)
+#       define R200_TXC_OP_CND0			(2 << 28)
+#       define R200_TXC_OP_LERP			(3 << 28)
+#       define R200_TXC_OP_DOT3			(4 << 28)
+#       define R200_TXC_OP_DOT4			(5 << 28)
+#       define R200_TXC_OP_CONDITIONAL		(6 << 28)
+#       define R200_TXC_OP_DOT2_ADD		(7 << 28)
+#       define R200_TXC_OP_MASK			(7 << 28)
+#define R200_PP_TXCBLEND2_0		0x2f04
+#       define R200_TXC_TFACTOR_SEL_SHIFT	0
+#       define R200_TXC_TFACTOR_SEL_MASK	0x7
+#       define R200_TXC_TFACTOR1_SEL_SHIFT	4
+#       define R200_TXC_TFACTOR1_SEL_MASK	(0x7 << 4)
+#       define R200_TXC_SCALE_SHIFT		8
+#       define R200_TXC_SCALE_MASK		(7 << 8)
+#       define R200_TXC_SCALE_1X		(0 << 8)
+#       define R200_TXC_SCALE_2X		(1 << 8)
+#       define R200_TXC_SCALE_4X		(2 << 8)
+#       define R200_TXC_SCALE_8X		(3 << 8)
+#       define R200_TXC_SCALE_INV2		(5 << 8)
+#       define R200_TXC_SCALE_INV4		(6 << 8)
+#       define R200_TXC_SCALE_INV8		(7 << 8)
+#       define R200_TXC_CLAMP_SHIFT		12
+#       define R200_TXC_CLAMP_MASK		(3 << 12)
+#       define R200_TXC_CLAMP_WRAP		(0 << 12)
+#       define R200_TXC_CLAMP_0_1		(1 << 12)
+#       define R200_TXC_CLAMP_8_8		(2 << 12)
+#       define R200_TXC_OUTPUT_REG_MASK		(7 << 16)
+#       define R200_TXC_OUTPUT_REG_NONE		(0 << 16)
+#       define R200_TXC_OUTPUT_REG_R0		(1 << 16)
+#       define R200_TXC_OUTPUT_REG_R1		(2 << 16)
+#       define R200_TXC_OUTPUT_REG_R2		(3 << 16)
+#       define R200_TXC_OUTPUT_REG_R3		(4 << 16)
+#       define R200_TXC_OUTPUT_REG_R4		(5 << 16)
+#       define R200_TXC_OUTPUT_REG_R5		(6 << 16)
+#       define R200_TXC_OUTPUT_MASK_MASK	(7 << 20)
+#       define R200_TXC_OUTPUT_MASK_RGB		(0 << 20)
+#       define R200_TXC_OUTPUT_MASK_RG		(1 << 20)
+#       define R200_TXC_OUTPUT_MASK_RB		(2 << 20)
+#       define R200_TXC_OUTPUT_MASK_R		(3 << 20)
+#       define R200_TXC_OUTPUT_MASK_GB		(4 << 20)
+#       define R200_TXC_OUTPUT_MASK_G		(5 << 20)
+#       define R200_TXC_OUTPUT_MASK_B		(6 << 20)
+#       define R200_TXC_OUTPUT_MASK_NONE	(7 << 20)
+#       define R200_TXC_REPL_NORMAL		0
+#       define R200_TXC_REPL_RED		1
+#       define R200_TXC_REPL_GREEN		2
+#       define R200_TXC_REPL_BLUE		3
+#       define R200_TXC_REPL_ARG_A_SHIFT	26
+#       define R200_TXC_REPL_ARG_A_MASK		(3 << 26)
+#       define R200_TXC_REPL_ARG_B_SHIFT	28
+#       define R200_TXC_REPL_ARG_B_MASK		(3 << 28)
+#       define R200_TXC_REPL_ARG_C_SHIFT	30
+#       define R200_TXC_REPL_ARG_C_MASK		(3 << 30)
+#define R200_PP_TXABLEND_0			0x2f08
+#       define R200_TXA_ARG_A_ZERO		(0)
+#       define R200_TXA_ARG_A_CURRENT_ALPHA	(2) /* guess */
+#       define R200_TXA_ARG_A_CURRENT_BLUE	(3) /* guess */
+#       define R200_TXA_ARG_A_DIFFUSE_ALPHA	(4)
+#       define R200_TXA_ARG_A_DIFFUSE_BLUE	(5)
+#       define R200_TXA_ARG_A_SPECULAR_ALPHA	(6)
+#       define R200_TXA_ARG_A_SPECULAR_BLUE	(7)
+#       define R200_TXA_ARG_A_TFACTOR_ALPHA	(8)
+#       define R200_TXA_ARG_A_TFACTOR_BLUE	(9)
+#       define R200_TXA_ARG_A_R0_ALPHA		(10)
+#       define R200_TXA_ARG_A_R0_BLUE		(11)
+#       define R200_TXA_ARG_A_R1_ALPHA		(12)
+#       define R200_TXA_ARG_A_R1_BLUE		(13)
+#       define R200_TXA_ARG_A_R2_ALPHA		(14)
+#       define R200_TXA_ARG_A_R2_BLUE		(15)
+#       define R200_TXA_ARG_A_R3_ALPHA		(16)
+#       define R200_TXA_ARG_A_R3_BLUE		(17)
+#       define R200_TXA_ARG_A_R4_ALPHA		(18)
+#       define R200_TXA_ARG_A_R4_BLUE		(19)
+#       define R200_TXA_ARG_A_R5_ALPHA		(20)
+#       define R200_TXA_ARG_A_R5_BLUE		(21)
+#       define R200_TXA_ARG_A_TFACTOR1_ALPHA	(26)
+#       define R200_TXA_ARG_A_TFACTOR1_BLUE	(27)
+#       define R200_TXA_ARG_A_MASK		(31 << 0)
+#       define R200_TXA_ARG_A_SHIFT		0
+#       define R200_TXA_ARG_B_ZERO		(0 << 5)
+#       define R200_TXA_ARG_B_CURRENT_ALPHA	(2 << 5) /* guess */
+#       define R200_TXA_ARG_B_CURRENT_BLUE	(3 << 5) /* guess */
+#       define R200_TXA_ARG_B_DIFFUSE_ALPHA	(4 << 5)
+#       define R200_TXA_ARG_B_DIFFUSE_BLUE	(5 << 5)
+#       define R200_TXA_ARG_B_SPECULAR_ALPHA	(6 << 5)
+#       define R200_TXA_ARG_B_SPECULAR_BLUE	(7 << 5)
+#       define R200_TXA_ARG_B_TFACTOR_ALPHA	(8 << 5)
+#       define R200_TXA_ARG_B_TFACTOR_BLUE	(9 << 5)
+#       define R200_TXA_ARG_B_R0_ALPHA		(10 << 5)
+#       define R200_TXA_ARG_B_R0_BLUE		(11 << 5)
+#       define R200_TXA_ARG_B_R1_ALPHA		(12 << 5)
+#       define R200_TXA_ARG_B_R1_BLUE		(13 << 5)
+#       define R200_TXA_ARG_B_R2_ALPHA		(14 << 5)
+#       define R200_TXA_ARG_B_R2_BLUE		(15 << 5)
+#       define R200_TXA_ARG_B_R3_ALPHA		(16 << 5)
+#       define R200_TXA_ARG_B_R3_BLUE		(17 << 5)
+#       define R200_TXA_ARG_B_R4_ALPHA		(18 << 5)
+#       define R200_TXA_ARG_B_R4_BLUE		(19 << 5)
+#       define R200_TXA_ARG_B_R5_ALPHA		(20 << 5)
+#       define R200_TXA_ARG_B_R5_BLUE		(21 << 5)
+#       define R200_TXA_ARG_B_TFACTOR1_ALPHA	(26 << 5)
+#       define R200_TXA_ARG_B_TFACTOR1_BLUE	(27 << 5)
+#       define R200_TXA_ARG_B_MASK		(31 << 5)
+#       define R200_TXA_ARG_B_SHIFT			5
+#       define R200_TXA_ARG_C_ZERO		(0 << 10)
+#       define R200_TXA_ARG_C_CURRENT_ALPHA	(2 << 10) /* guess */
+#       define R200_TXA_ARG_C_CURRENT_BLUE	(3 << 10) /* guess */
+#       define R200_TXA_ARG_C_DIFFUSE_ALPHA	(4 << 10)
+#       define R200_TXA_ARG_C_DIFFUSE_BLUE	(5 << 10)
+#       define R200_TXA_ARG_C_SPECULAR_ALPHA	(6 << 10)
+#       define R200_TXA_ARG_C_SPECULAR_BLUE	(7 << 10)
+#       define R200_TXA_ARG_C_TFACTOR_ALPHA	(8 << 10)
+#       define R200_TXA_ARG_C_TFACTOR_BLUE	(9 << 10)
+#       define R200_TXA_ARG_C_R0_ALPHA		(10 << 10)
+#       define R200_TXA_ARG_C_R0_BLUE		(11 << 10)
+#       define R200_TXA_ARG_C_R1_ALPHA		(12 << 10)
+#       define R200_TXA_ARG_C_R1_BLUE		(13 << 10)
+#       define R200_TXA_ARG_C_R2_ALPHA		(14 << 10)
+#       define R200_TXA_ARG_C_R2_BLUE		(15 << 10)
+#       define R200_TXA_ARG_C_R3_ALPHA		(16 << 10)
+#       define R200_TXA_ARG_C_R3_BLUE		(17 << 10)
+#       define R200_TXA_ARG_C_R4_ALPHA		(18 << 10)
+#       define R200_TXA_ARG_C_R4_BLUE		(19 << 10)
+#       define R200_TXA_ARG_C_R5_ALPHA		(20 << 10)
+#       define R200_TXA_ARG_C_R5_BLUE		(21 << 10)
+#       define R200_TXA_ARG_C_TFACTOR1_ALPHA	(26 << 10)
+#       define R200_TXA_ARG_C_TFACTOR1_BLUE	(27 << 10)
+#       define R200_TXA_ARG_C_MASK		(31 << 10)
+#       define R200_TXA_ARG_C_SHIFT		10
+#       define R200_TXA_COMP_ARG_A		(1 << 16)
+#       define R200_TXA_COMP_ARG_A_SHIFT	(16)
+#       define R200_TXA_BIAS_ARG_A		(1 << 17)
+#       define R200_TXA_SCALE_ARG_A		(1 << 18)
+#       define R200_TXA_NEG_ARG_A		(1 << 19)
+#       define R200_TXA_COMP_ARG_B		(1 << 20)
+#       define R200_TXA_COMP_ARG_B_SHIFT	(20)
+#       define R200_TXA_BIAS_ARG_B		(1 << 21)
+#       define R200_TXA_SCALE_ARG_B		(1 << 22)
+#       define R200_TXA_NEG_ARG_B		(1 << 23)
+#       define R200_TXA_COMP_ARG_C		(1 << 24)
+#       define R200_TXA_COMP_ARG_C_SHIFT	(24)
+#       define R200_TXA_BIAS_ARG_C		(1 << 25)
+#       define R200_TXA_SCALE_ARG_C		(1 << 26)
+#       define R200_TXA_NEG_ARG_C		(1 << 27)
+#       define R200_TXA_OP_MADD			(0 << 28)
+#       define R200_TXA_OP_CND0			(2 << 28)
+#       define R200_TXA_OP_LERP			(3 << 28)
+#       define R200_TXA_OP_CONDITIONAL		(6 << 28)
+#       define R200_TXA_OP_MASK			(7 << 28)
+#define R200_PP_TXABLEND2_0			0x2f0c
+#       define R200_TXA_TFACTOR_SEL_SHIFT	0
+#       define R200_TXA_TFACTOR_SEL_MASK	0x7
+#       define R200_TXA_TFACTOR1_SEL_SHIFT	4
+#       define R200_TXA_TFACTOR1_SEL_MASK	(0x7 << 4)
+#       define R200_TXA_SCALE_SHIFT		8
+#       define R200_TXA_SCALE_MASK		(7 << 8)
+#       define R200_TXA_SCALE_1X		(0 << 8)
+#       define R200_TXA_SCALE_2X		(1 << 8)
+#       define R200_TXA_SCALE_4X		(2 << 8)
+#       define R200_TXA_SCALE_8X		(3 << 8)
+#       define R200_TXA_SCALE_INV2		(5 << 8)
+#       define R200_TXA_SCALE_INV4		(6 << 8)
+#       define R200_TXA_SCALE_INV8		(7 << 8)
+#       define R200_TXA_CLAMP_SHIFT		12
+#       define R200_TXA_CLAMP_MASK		(3 << 12)
+#       define R200_TXA_CLAMP_WRAP		(0 << 12)
+#       define R200_TXA_CLAMP_0_1		(1 << 12)
+#       define R200_TXA_CLAMP_8_8		(2 << 12)
+#       define R200_TXA_OUTPUT_REG_MASK		(7 << 16)
+#       define R200_TXA_OUTPUT_REG_NONE		(0 << 16)
+#       define R200_TXA_OUTPUT_REG_R0		(1 << 16)
+#       define R200_TXA_OUTPUT_REG_R1		(2 << 16)
+#       define R200_TXA_OUTPUT_REG_R2		(3 << 16)
+#       define R200_TXA_OUTPUT_REG_R3		(4 << 16)
+#       define R200_TXA_OUTPUT_REG_R4		(5 << 16)
+#       define R200_TXA_OUTPUT_REG_R5		(6 << 16)
+#       define R200_TXA_DOT_ALPHA		(1 << 20)
+#       define R200_TXA_REPL_NORMAL		0
+#       define R200_TXA_REPL_RED		1
+#       define R200_TXA_REPL_GREEN		2
+#       define R200_TXA_REPL_ARG_A_SHIFT	26
+#       define R200_TXA_REPL_ARG_A_MASK		(3 << 26)
+#       define R200_TXA_REPL_ARG_B_SHIFT	28
+#       define R200_TXA_REPL_ARG_B_MASK		(3 << 28)
+#       define R200_TXA_REPL_ARG_C_SHIFT	30
+#       define R200_TXA_REPL_ARG_C_MASK		(3 << 30)
+
+#define R200_SE_VTX_FMT_0			0x2088
+#       define R200_VTX_XY			0 /* always have xy */
+#       define R200_VTX_Z0			(1<<0)
+#       define R200_VTX_W0			(1<<1)
+#       define R200_VTX_WEIGHT_COUNT_SHIFT	(2)
+#       define R200_VTX_PV_MATRIX_SEL		(1<<5)
+#       define R200_VTX_N0			(1<<6)
+#       define R200_VTX_POINT_SIZE		(1<<7)
+#       define R200_VTX_DISCRETE_FOG		(1<<8)
+#       define R200_VTX_SHININESS_0		(1<<9)
+#       define R200_VTX_SHININESS_1		(1<<10)
+#       define   R200_VTX_COLOR_NOT_PRESENT	0
+#       define   R200_VTX_PK_RGBA		1
+#       define   R200_VTX_FP_RGB		2
+#       define   R200_VTX_FP_RGBA		3
+#       define   R200_VTX_COLOR_MASK		3
+#       define R200_VTX_COLOR_0_SHIFT		11
+#       define R200_VTX_COLOR_1_SHIFT		13
+#       define R200_VTX_COLOR_2_SHIFT		15
+#       define R200_VTX_COLOR_3_SHIFT		17
+#       define R200_VTX_COLOR_4_SHIFT		19
+#       define R200_VTX_COLOR_5_SHIFT		21
+#       define R200_VTX_COLOR_6_SHIFT		23
+#       define R200_VTX_COLOR_7_SHIFT		25
+#       define R200_VTX_XY1			(1<<28)
+#       define R200_VTX_Z1			(1<<29)
+#       define R200_VTX_W1			(1<<30)
+#       define R200_VTX_N1			(1<<31)
+#define R200_SE_VTX_FMT_1			0x208c
+#       define R200_VTX_TEX0_COMP_CNT_SHIFT	0
+#       define R200_VTX_TEX1_COMP_CNT_SHIFT	3
+#       define R200_VTX_TEX2_COMP_CNT_SHIFT	6
+#       define R200_VTX_TEX3_COMP_CNT_SHIFT	9
+#       define R200_VTX_TEX4_COMP_CNT_SHIFT	12
+#       define R200_VTX_TEX5_COMP_CNT_SHIFT	15
+
+#define R200_SE_TCL_OUTPUT_VTX_FMT_0		0x2090
+#define R200_SE_TCL_OUTPUT_VTX_FMT_1		0x2094
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL		0x2250
+#       define R200_OUTPUT_XYZW			(1<<0)
+#       define R200_OUTPUT_COLOR_0		(1<<8)
+#       define R200_OUTPUT_COLOR_1		(1<<9)
+#       define R200_OUTPUT_TEX_0		(1<<16)
+#       define R200_OUTPUT_TEX_1		(1<<17)
+#       define R200_OUTPUT_TEX_2		(1<<18)
+#       define R200_OUTPUT_TEX_3		(1<<19)
+#       define R200_OUTPUT_TEX_4		(1<<20)
+#       define R200_OUTPUT_TEX_5		(1<<21)
+#       define R200_OUTPUT_TEX_MASK		(0x3f<<16)
+#       define R200_OUTPUT_DISCRETE_FOG		(1<<24)
+#       define R200_OUTPUT_PT_SIZE		(1<<25)
+#       define R200_FORCE_INORDER_PROC		(1<<31)
+#define R200_PP_CNTL_X				0x2cc4
+#define R200_PP_TXMULTI_CTL_0			0x2c1c
+#define R200_PP_TXMULTI_CTL_1			0x2c3c
+#define R200_PP_TXMULTI_CTL_2			0x2c5c
+#define R200_PP_TXMULTI_CTL_3			0x2c7c
+#define R200_PP_TXMULTI_CTL_4			0x2c9c
+#define R200_PP_TXMULTI_CTL_5			0x2cbc
+#define R200_SE_VTX_STATE_CNTL			0x2180
+#       define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
+
+				/* Registers for CP and Microcode Engine */
+#define RADEON_CP_ME_RAM_ADDR               0x07d4
+#define RADEON_CP_ME_RAM_RADDR              0x07d8
+#define RADEON_CP_ME_RAM_DATAH              0x07dc
+#define RADEON_CP_ME_RAM_DATAL              0x07e0
+
+#define RADEON_CP_RB_BASE                   0x0700
+#define RADEON_CP_RB_CNTL                   0x0704
+#	define RADEON_RB_BUFSZ_SHIFT		0
+#	define RADEON_RB_BUFSZ_MASK		(0x3f << 0)
+#	define RADEON_RB_BLKSZ_SHIFT		8
+#	define RADEON_RB_BLKSZ_MASK		(0x3f << 8)
+#	define RADEON_BUF_SWAP_32BIT		(2 << 16)
+#	define RADEON_MAX_FETCH_SHIFT		18
+#	define RADEON_MAX_FETCH_MASK		(0x3 << 18)
+#	define RADEON_RB_NO_UPDATE		(1 << 27)
+#	define RADEON_RB_RPTR_WR_ENA		(1U << 31)
+#define RADEON_CP_RB_RPTR_ADDR              0x070c
+#define RADEON_CP_RB_RPTR                   0x0710
+#define RADEON_CP_RB_WPTR                   0x0714
+#define RADEON_CP_RB_RPTR_WR                0x071c
+
+#define RADEON_SCRATCH_UMSK		    0x0770
+#define RADEON_SCRATCH_ADDR		    0x0774
+
+#define R600_CP_RB_BASE                     0xc100
+#define R600_CP_RB_CNTL                     0xc104
+#       define R600_RB_BUFSZ(x)             ((x) << 0)
+#       define R600_RB_BLKSZ(x)             ((x) << 8)
+#       define R600_RB_NO_UPDATE            (1 << 27)
+#       define R600_RB_RPTR_WR_ENA          (1U << 31)
+#define R600_CP_RB_RPTR_WR                  0xc108
+#define R600_CP_RB_RPTR_ADDR                0xc10c
+#define R600_CP_RB_RPTR_ADDR_HI             0xc110
+#define R600_CP_RB_WPTR                     0xc114
+#define R600_CP_RB_WPTR_ADDR                0xc118
+#define R600_CP_RB_WPTR_ADDR_HI             0xc11c
+#define R600_CP_RB_RPTR                     0x8700
+#define R600_CP_RB_WPTR_DELAY               0x8704
+
+#define RADEON_CP_IB_BASE                   0x0738
+#define RADEON_CP_IB_BUFSZ                  0x073c
+
+#define RADEON_CP_CSQ_CNTL                  0x0740
+#       define RADEON_CSQ_CNT_PRIMARY_MASK     (0xff << 0)
+#       define RADEON_CSQ_PRIDIS_INDDIS        (0    << 28)
+#       define RADEON_CSQ_PRIPIO_INDDIS        (1    << 28)
+#       define RADEON_CSQ_PRIBM_INDDIS         (2    << 28)
+#       define RADEON_CSQ_PRIPIO_INDBM         (3    << 28)
+#       define RADEON_CSQ_PRIBM_INDBM          (4    << 28)
+#       define RADEON_CSQ_PRIPIO_INDPIO        (15   << 28)
+
+#define R300_CP_RESYNC_ADDR                 0x778
+#define R300_CP_RESYNC_DATA                 0x77c
+
+#define RADEON_CP_CSQ_STAT                  0x07f8
+#       define RADEON_CSQ_RPTR_PRIMARY_MASK    (0xff <<  0)
+#       define RADEON_CSQ_WPTR_PRIMARY_MASK    (0xff <<  8)
+#       define RADEON_CSQ_RPTR_INDIRECT_MASK   (0xff << 16)
+#       define RADEON_CSQ_WPTR_INDIRECT_MASK   (0xff << 24)
+#define RADEON_CP_CSQ2_STAT                  0x07fc
+#define RADEON_CP_CSQ_ADDR                  0x07f0
+#define RADEON_CP_CSQ_DATA                  0x07f4
+#define RADEON_CP_CSQ_APER_PRIMARY          0x1000
+#define RADEON_CP_CSQ_APER_INDIRECT         0x1300
+
+#define RADEON_CP_RB_WPTR_DELAY             0x0718
+#       define RADEON_PRE_WRITE_TIMER_SHIFT    0
+#       define RADEON_PRE_WRITE_LIMIT_SHIFT    23
+#define RADEON_CP_CSQ_MODE		0x0744
+#	define RADEON_INDIRECT2_START_SHIFT	0
+#	define RADEON_INDIRECT2_START_MASK	(0x7f << 0)
+#	define RADEON_INDIRECT1_START_SHIFT	8
+#	define RADEON_INDIRECT1_START_MASK	(0x7f << 8)
+
+#define RADEON_AIC_CNTL                     0x01d0
+#       define RADEON_PCIGART_TRANSLATE_EN     (1 << 0)
+#       define RADEON_DIS_OUT_OF_PCI_GART_ACCESS     (1 << 1)
+#	define RS400_MSI_REARM	                (1 << 3) /* rs400/rs480 */
+#define RADEON_AIC_LO_ADDR                  0x01dc
+#define RADEON_AIC_PT_BASE		0x01d8
+#define RADEON_AIC_HI_ADDR		0x01e0
+
+
+
+				/* Constants */
+/* #define RADEON_LAST_FRAME_REG               RADEON_GUI_SCRATCH_REG0 */
+/* efine RADEON_LAST_CLEAR_REG               RADEON_GUI_SCRATCH_REG2 */
+
+
+
+				/* CP packet types */
+#define RADEON_CP_PACKET0                           0x00000000
+#define RADEON_CP_PACKET1                           0x40000000
+#define RADEON_CP_PACKET2                           0x80000000
+#define RADEON_CP_PACKET3                           0xC0000000
+#       define RADEON_CP_PACKET_MASK                0xC0000000
+#       define RADEON_CP_PACKET_COUNT_MASK          0x3fff0000
+#       define RADEON_CP_PACKET_MAX_DWORDS          (1 << 12)
+#       define RADEON_CP_PACKET0_REG_MASK           0x000007ff
+#       define R300_CP_PACKET0_REG_MASK             0x00001fff
+#       define R600_CP_PACKET0_REG_MASK             0x0000ffff
+#       define RADEON_CP_PACKET1_REG0_MASK          0x000007ff
+#       define RADEON_CP_PACKET1_REG1_MASK          0x003ff800
+
+#define RADEON_CP_PACKET0_ONE_REG_WR                0x00008000
+
+#define RADEON_CP_PACKET3_NOP                       0xC0001000
+#define RADEON_CP_PACKET3_NEXT_CHAR                 0xC0001900
+#define RADEON_CP_PACKET3_PLY_NEXTSCAN              0xC0001D00
+#define RADEON_CP_PACKET3_SET_SCISSORS              0xC0001E00
+#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM     0xC0002300
+#define RADEON_CP_PACKET3_LOAD_MICROCODE            0xC0002400
+#define RADEON_CP_PACKET3_WAIT_FOR_IDLE             0xC0002600
+#define RADEON_CP_PACKET3_3D_DRAW_VBUF              0xC0002800
+#define RADEON_CP_PACKET3_3D_DRAW_IMMD              0xC0002900
+#define RADEON_CP_PACKET3_3D_DRAW_INDX              0xC0002A00
+#define RADEON_CP_PACKET3_LOAD_PALETTE              0xC0002C00
+#define R200_CP_PACKET3_3D_DRAW_IMMD_2              0xc0003500
+#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR            0xC0002F00
+#define RADEON_CP_PACKET3_CNTL_PAINT                0xC0009100
+#define RADEON_CP_PACKET3_CNTL_BITBLT               0xC0009200
+#define RADEON_CP_PACKET3_CNTL_SMALLTEXT            0xC0009300
+#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT         0xC0009400
+#define RADEON_CP_PACKET3_CNTL_POLYLINE             0xC0009500
+#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES        0xC0009800
+#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI          0xC0009A00
+#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI         0xC0009B00
+#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT         0xC0009C00
+
+
+#define RADEON_CP_VC_FRMT_XY                        0x00000000
+#define RADEON_CP_VC_FRMT_W0                        0x00000001
+#define RADEON_CP_VC_FRMT_FPCOLOR                   0x00000002
+#define RADEON_CP_VC_FRMT_FPALPHA                   0x00000004
+#define RADEON_CP_VC_FRMT_PKCOLOR                   0x00000008
+#define RADEON_CP_VC_FRMT_FPSPEC                    0x00000010
+#define RADEON_CP_VC_FRMT_FPFOG                     0x00000020
+#define RADEON_CP_VC_FRMT_PKSPEC                    0x00000040
+#define RADEON_CP_VC_FRMT_ST0                       0x00000080
+#define RADEON_CP_VC_FRMT_ST1                       0x00000100
+#define RADEON_CP_VC_FRMT_Q1                        0x00000200
+#define RADEON_CP_VC_FRMT_ST2                       0x00000400
+#define RADEON_CP_VC_FRMT_Q2                        0x00000800
+#define RADEON_CP_VC_FRMT_ST3                       0x00001000
+#define RADEON_CP_VC_FRMT_Q3                        0x00002000
+#define RADEON_CP_VC_FRMT_Q0                        0x00004000
+#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK      0x00038000
+#define RADEON_CP_VC_FRMT_N0                        0x00040000
+#define RADEON_CP_VC_FRMT_XY1                       0x08000000
+#define RADEON_CP_VC_FRMT_Z1                        0x10000000
+#define RADEON_CP_VC_FRMT_W1                        0x20000000
+#define RADEON_CP_VC_FRMT_N1                        0x40000000
+#define RADEON_CP_VC_FRMT_Z                         0x80000000
+
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE            0x00000000
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT           0x00000001
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE            0x00000002
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP      0x00000003
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST        0x00000004
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN         0x00000005
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP       0x00000006
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2      0x00000007
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST       0x00000008
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST  0x0000000a
+#define RADEON_CP_VC_CNTL_PRIM_WALK_IND             0x00000010
+#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST            0x00000020
+#define RADEON_CP_VC_CNTL_PRIM_WALK_RING            0x00000030
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA          0x00000000
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA          0x00000040
+#define RADEON_CP_VC_CNTL_MAOS_ENABLE               0x00000080
+#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE   0x00000000
+#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE       0x00000100
+#define RADEON_CP_VC_CNTL_TCL_DISABLE               0x00000000
+#define RADEON_CP_VC_CNTL_TCL_ENABLE                0x00000200
+#define RADEON_CP_VC_CNTL_NUM_SHIFT                 16
+
+#define RADEON_VS_MATRIX_0_ADDR                   0
+#define RADEON_VS_MATRIX_1_ADDR                   4
+#define RADEON_VS_MATRIX_2_ADDR                   8
+#define RADEON_VS_MATRIX_3_ADDR                  12
+#define RADEON_VS_MATRIX_4_ADDR                  16
+#define RADEON_VS_MATRIX_5_ADDR                  20
+#define RADEON_VS_MATRIX_6_ADDR                  24
+#define RADEON_VS_MATRIX_7_ADDR                  28
+#define RADEON_VS_MATRIX_8_ADDR                  32
+#define RADEON_VS_MATRIX_9_ADDR                  36
+#define RADEON_VS_MATRIX_10_ADDR                 40
+#define RADEON_VS_MATRIX_11_ADDR                 44
+#define RADEON_VS_MATRIX_12_ADDR                 48
+#define RADEON_VS_MATRIX_13_ADDR                 52
+#define RADEON_VS_MATRIX_14_ADDR                 56
+#define RADEON_VS_MATRIX_15_ADDR                 60
+#define RADEON_VS_LIGHT_AMBIENT_ADDR             64
+#define RADEON_VS_LIGHT_DIFFUSE_ADDR             72
+#define RADEON_VS_LIGHT_SPECULAR_ADDR            80
+#define RADEON_VS_LIGHT_DIRPOS_ADDR              88
+#define RADEON_VS_LIGHT_HWVSPOT_ADDR             96
+#define RADEON_VS_LIGHT_ATTENUATION_ADDR        104
+#define RADEON_VS_MATRIX_EYE2CLIP_ADDR          112
+#define RADEON_VS_UCP_ADDR                      116
+#define RADEON_VS_GLOBAL_AMBIENT_ADDR           122
+#define RADEON_VS_FOG_PARAM_ADDR                123
+#define RADEON_VS_EYE_VECTOR_ADDR               124
+
+#define RADEON_SS_LIGHT_DCD_ADDR                  0
+#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR        8
+#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR         16
+#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR     24
+#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR        32
+#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR       48
+#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR    49
+#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR       50
+#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR    51
+#define RADEON_SS_SHININESS                      60
+
+#define RADEON_TV_MASTER_CNTL                    0x0800
+#       define RADEON_TV_ASYNC_RST               (1 <<  0)
+#       define RADEON_CRT_ASYNC_RST              (1 <<  1)
+#       define RADEON_RESTART_PHASE_FIX          (1 <<  3)
+#	define RADEON_TV_FIFO_ASYNC_RST		 (1 <<  4)
+#	define RADEON_VIN_ASYNC_RST		 (1 <<  5)
+#	define RADEON_AUD_ASYNC_RST		 (1 <<  6)
+#	define RADEON_DVS_ASYNC_RST		 (1 <<  7)
+#       define RADEON_CRT_FIFO_CE_EN             (1 <<  9)
+#       define RADEON_TV_FIFO_CE_EN              (1 << 10)
+#       define RADEON_RE_SYNC_NOW_SEL_MASK       (3 << 14)
+#       define RADEON_TVCLK_ALWAYS_ONb           (1 << 30)
+#	define RADEON_TV_ON			 (1U << 31)
+#define RADEON_TV_PRE_DAC_MUX_CNTL               0x0888
+#       define RADEON_Y_RED_EN                   (1 << 0)
+#       define RADEON_C_GRN_EN                   (1 << 1)
+#       define RADEON_CMP_BLU_EN                 (1 << 2)
+#       define RADEON_DAC_DITHER_EN              (1 << 3)
+#       define RADEON_RED_MX_FORCE_DAC_DATA      (6 << 4)
+#       define RADEON_GRN_MX_FORCE_DAC_DATA      (6 << 8)
+#       define RADEON_BLU_MX_FORCE_DAC_DATA      (6 << 12)
+#       define RADEON_TV_FORCE_DAC_DATA_SHIFT    16
+#define RADEON_TV_RGB_CNTL                           0x0804
+#       define RADEON_SWITCH_TO_BLUE		  (1 <<  4)
+#       define RADEON_RGB_DITHER_EN		  (1 <<  5)
+#       define RADEON_RGB_SRC_SEL_MASK		  (3 <<  8)
+#       define RADEON_RGB_SRC_SEL_CRTC1		  (0 <<  8)
+#       define RADEON_RGB_SRC_SEL_RMX		  (1 <<  8)
+#       define RADEON_RGB_SRC_SEL_CRTC2		  (2 <<  8)
+#       define RADEON_RGB_CONVERT_BY_PASS	  (1 << 10)
+#       define RADEON_UVRAM_READ_MARGIN_SHIFT	  16
+#       define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT	  20
+#       define RADEON_RGB_ATTEN_SEL(x)            ((x) << 24)
+#       define RADEON_TVOUT_SCALE_EN              (1 << 26)
+#       define RADEON_RGB_ATTEN_VAL(x)            ((x) << 28)
+#define RADEON_TV_SYNC_CNTL                          0x0808
+#       define RADEON_SYNC_OE                     (1 <<  0)
+#       define RADEON_SYNC_OUT                    (1 <<  1)
+#       define RADEON_SYNC_IN                     (1 <<  2)
+#       define RADEON_SYNC_PUB                    (1 <<  3)
+#       define RADEON_SYNC_PD                     (1 <<  4)
+#       define RADEON_TV_SYNC_IO_DRIVE            (1 <<  5)
+#define RADEON_TV_HTOTAL                             0x080c
+#define RADEON_TV_HDISP                              0x0810
+#define RADEON_TV_HSTART                             0x0818
+#define RADEON_TV_HCOUNT                             0x081C
+#define RADEON_TV_VTOTAL                             0x0820
+#define RADEON_TV_VDISP                              0x0824
+#define RADEON_TV_VCOUNT                             0x0828
+#define RADEON_TV_FTOTAL                             0x082c
+#define RADEON_TV_FCOUNT                             0x0830
+#define RADEON_TV_FRESTART                           0x0834
+#define RADEON_TV_HRESTART                           0x0838
+#define RADEON_TV_VRESTART                           0x083c
+#define RADEON_TV_HOST_READ_DATA                     0x0840
+#define RADEON_TV_HOST_WRITE_DATA                    0x0844
+#define RADEON_TV_HOST_RD_WT_CNTL                    0x0848
+#	define RADEON_HOST_FIFO_RD		 (1 << 12)
+#	define RADEON_HOST_FIFO_RD_ACK		 (1 << 13)
+#	define RADEON_HOST_FIFO_WT		 (1 << 14)
+#	define RADEON_HOST_FIFO_WT_ACK		 (1 << 15)
+#define RADEON_TV_VSCALER_CNTL1                      0x084c
+#       define RADEON_UV_INC_MASK                0xffff
+#       define RADEON_UV_INC_SHIFT               0
+#       define RADEON_Y_W_EN			 (1 << 24)
+#       define RADEON_RESTART_FIELD              (1 << 29) /* restart on field 0 */
+#       define RADEON_Y_DEL_W_SIG_SHIFT          26
+#define RADEON_TV_TIMING_CNTL                        0x0850
+#       define RADEON_H_INC_MASK                 0xfff
+#       define RADEON_H_INC_SHIFT                0
+#       define RADEON_REQ_Y_FIRST                (1 << 19)
+#       define RADEON_FORCE_BURST_ALWAYS         (1 << 21)
+#       define RADEON_UV_POST_SCALE_BYPASS       (1 << 23)
+#       define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24
+#define RADEON_TV_VSCALER_CNTL2                      0x0854
+#       define RADEON_DITHER_MODE                (1 <<  0)
+#       define RADEON_Y_OUTPUT_DITHER_EN         (1 <<  1)
+#       define RADEON_UV_OUTPUT_DITHER_EN        (1 <<  2)
+#       define RADEON_UV_TO_BUF_DITHER_EN        (1 <<  3)
+#define RADEON_TV_Y_FALL_CNTL                        0x0858
+#       define RADEON_Y_FALL_PING_PONG           (1 << 16)
+#       define RADEON_Y_COEF_EN                  (1 << 17)
+#define RADEON_TV_Y_RISE_CNTL                        0x085c
+#       define RADEON_Y_RISE_PING_PONG           (1 << 16)
+#define RADEON_TV_Y_SAW_TOOTH_CNTL                   0x0860
+#define RADEON_TV_UPSAMP_AND_GAIN_CNTL               0x0864
+#	define RADEON_YUPSAMP_EN		 (1 <<  0)
+#	define RADEON_UVUPSAMP_EN		 (1 <<  2)
+#define RADEON_TV_GAIN_LIMIT_SETTINGS                0x0868
+#       define RADEON_Y_GAIN_LIMIT_SHIFT         0
+#       define RADEON_UV_GAIN_LIMIT_SHIFT        16
+#define RADEON_TV_LINEAR_GAIN_SETTINGS               0x086c
+#       define RADEON_Y_GAIN_SHIFT               0
+#       define RADEON_UV_GAIN_SHIFT              16
+#define RADEON_TV_MODULATOR_CNTL1                    0x0870
+#	define RADEON_YFLT_EN			 (1 <<  2)
+#	define RADEON_UVFLT_EN			 (1 <<  3)
+#       define RADEON_ALT_PHASE_EN               (1 <<  6)
+#       define RADEON_SYNC_TIP_LEVEL             (1 <<  7)
+#       define RADEON_BLANK_LEVEL_SHIFT          8
+#       define RADEON_SET_UP_LEVEL_SHIFT         16
+#	define RADEON_SLEW_RATE_LIMIT		 (1 << 23)
+#       define RADEON_CY_FILT_BLEND_SHIFT        28
+#define RADEON_TV_MODULATOR_CNTL2                    0x0874
+#       define RADEON_TV_U_BURST_LEVEL_MASK     0x1ff
+#       define RADEON_TV_V_BURST_LEVEL_MASK     0x1ff
+#       define RADEON_TV_V_BURST_LEVEL_SHIFT    16
+#define RADEON_TV_CRC_CNTL                           0x0890
+#define RADEON_TV_UV_ADR                             0x08ac
+#	define RADEON_MAX_UV_ADR_MASK		 0x000000ff
+#	define RADEON_MAX_UV_ADR_SHIFT		 0
+#	define RADEON_TABLE1_BOT_ADR_MASK	 0x0000ff00
+#	define RADEON_TABLE1_BOT_ADR_SHIFT	 8
+#	define RADEON_TABLE3_TOP_ADR_MASK	 0x00ff0000
+#	define RADEON_TABLE3_TOP_ADR_SHIFT	 16
+#	define RADEON_HCODE_TABLE_SEL_MASK	 0x06000000
+#	define RADEON_HCODE_TABLE_SEL_SHIFT	 25
+#	define RADEON_VCODE_TABLE_SEL_MASK	 0x18000000
+#	define RADEON_VCODE_TABLE_SEL_SHIFT	 27
+#	define RADEON_TV_MAX_FIFO_ADDR		 0x1a7
+#	define RADEON_TV_MAX_FIFO_ADDR_INTERNAL	 0x1ff
+#define RADEON_TV_PLL_FINE_CNTL			     0x0020	/* PLL */
+#define RADEON_TV_PLL_CNTL                           0x0021	/* PLL */
+#       define RADEON_TV_M0LO_MASK               0xff
+#       define RADEON_TV_M0HI_MASK               0x7
+#       define RADEON_TV_M0HI_SHIFT              18
+#       define RADEON_TV_N0LO_MASK               0x1ff
+#       define RADEON_TV_N0LO_SHIFT              8
+#       define RADEON_TV_N0HI_MASK               0x3
+#       define RADEON_TV_N0HI_SHIFT              21
+#       define RADEON_TV_P_MASK                  0xf
+#       define RADEON_TV_P_SHIFT                 24
+#       define RADEON_TV_SLIP_EN                 (1 << 23)
+#       define RADEON_TV_DTO_EN                  (1 << 28)
+#define RADEON_TV_PLL_CNTL1                          0x0022	/* PLL */
+#       define RADEON_TVPLL_RESET                (1 <<  1)
+#       define RADEON_TVPLL_SLEEP                (1 <<  3)
+#       define RADEON_TVPLL_REFCLK_SEL           (1 <<  4)
+#       define RADEON_TVPCP_SHIFT                8
+#       define RADEON_TVPCP_MASK                 (7 << 8)
+#       define RADEON_TVPVG_SHIFT                11
+#       define RADEON_TVPVG_MASK                 (7 << 11)
+#       define RADEON_TVPDC_SHIFT                14
+#       define RADEON_TVPDC_MASK                 (3 << 14)
+#       define RADEON_TVPLL_TEST_DIS             (1U << 31)
+#       define RADEON_TVCLK_SRC_SEL_TVPLL        (1 << 30)
+
+#define RS400_DISP2_REQ_CNTL1			0xe30
+#       define RS400_DISP2_START_REQ_LEVEL_SHIFT   0
+#       define RS400_DISP2_START_REQ_LEVEL_MASK    0x3ff
+#       define RS400_DISP2_STOP_REQ_LEVEL_SHIFT    12
+#       define RS400_DISP2_STOP_REQ_LEVEL_MASK     0x3ff
+#       define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT   22
+#       define RS400_DISP2_ALLOW_FID_LEVEL_MASK    0x3ff
+#define RS400_DISP2_REQ_CNTL2			0xe34
+#       define RS400_DISP2_CRITICAL_POINT_START_SHIFT    12
+#       define RS400_DISP2_CRITICAL_POINT_START_MASK     0x3ff
+#       define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT     22
+#       define RS400_DISP2_CRITICAL_POINT_STOP_MASK      0x3ff
+#define RS400_DMIF_MEM_CNTL1			0xe38
+#       define RS400_DISP2_START_ADR_SHIFT      0
+#       define RS400_DISP2_START_ADR_MASK       0x3ff
+#       define RS400_DISP1_CRITICAL_POINT_START_SHIFT    12
+#       define RS400_DISP1_CRITICAL_POINT_START_MASK     0x3ff
+#       define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT     22
+#       define RS400_DISP1_CRITICAL_POINT_STOP_MASK      0x3ff
+#define RS400_DISP1_REQ_CNTL1			0xe3c
+#       define RS400_DISP1_START_REQ_LEVEL_SHIFT   0
+#       define RS400_DISP1_START_REQ_LEVEL_MASK    0x3ff
+#       define RS400_DISP1_STOP_REQ_LEVEL_SHIFT    12
+#       define RS400_DISP1_STOP_REQ_LEVEL_MASK     0x3ff
+#       define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT   22
+#       define RS400_DISP1_ALLOW_FID_LEVEL_MASK    0x3ff
+
+#define RADEON_PCIE_INDEX               0x0030
+#define RADEON_PCIE_DATA                0x0034
+#define RADEON_PCIE_TX_GART_CNTL	0x10
+#	define RADEON_PCIE_TX_GART_EN		(1 << 0)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
+#	define RADEON_PCIE_TX_GART_MODE_32_128_CACHE	(0 << 3)
+#	define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE	(1 << 3)
+#	define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
+#	define RADEON_PCIE_TX_GART_INVALIDATE_TLB	(1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE	0x13
+#define RADEON_PCIE_TX_GART_START_LO	0x14
+#define RADEON_PCIE_TX_GART_START_HI	0x15
+#define RADEON_PCIE_TX_GART_END_LO	0x16
+#define RADEON_PCIE_TX_GART_END_HI	0x17
+#define RADEON_PCIE_TX_GART_ERROR	0x18
+
+#define RADEON_SCRATCH_REG0		0x15e0
+#define RADEON_SCRATCH_REG1		0x15e4
+#define RADEON_SCRATCH_REG2		0x15e8
+#define RADEON_SCRATCH_REG3		0x15ec
+#define RADEON_SCRATCH_REG4		0x15f0
+#define RADEON_SCRATCH_REG5		0x15f4
+
+#define RV530_GB_PIPE_SELECT2           0x4124
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_reg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_ring.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_ring.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_ring.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,884 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ *          Christian König
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_ring.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+#ifdef FREEBSD_WIP
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored.  You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them.  Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+#endif /* FREEBSD_WIP */
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics).  IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+		  struct radeon_ib *ib, struct radeon_vm *vm,
+		  unsigned size)
+{
+	int i, r;
+
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
+	if (r) {
+		dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+		return r;
+	}
+
+	r = radeon_semaphore_create(rdev, &ib->semaphore);
+	if (r) {
+		return r;
+	}
+
+	ib->ring = ring;
+	ib->fence = NULL;
+	ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+	ib->vm = vm;
+	if (vm) {
+		/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+		 * space and soffset is the offset inside the pool bo
+		 */
+		ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+	} else {
+		ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+	}
+	ib->is_const_ib = false;
+	for (i = 0; i < RADEON_NUM_RINGS; ++i)
+		ib->sync_to[i] = NULL;
+
+	return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+	radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+	radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine).  Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed.  To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE.  If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+		       struct radeon_ib *const_ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	bool need_sync = false;
+	int i, r = 0;
+
+	if (!ib->length_dw || !ring->ready) {
+		/* TODO: Nothings in the ib we should report. */
+		dev_err(rdev->dev, "couldn't schedule ib\n");
+		return -EINVAL;
+	}
+
+	/* 64 dwords should be enough for fence too */
+	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+	if (r) {
+		dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+		return r;
+	}
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_fence *fence = ib->sync_to[i];
+		if (radeon_fence_need_sync(fence, ib->ring)) {
+			need_sync = true;
+			radeon_semaphore_sync_rings(rdev, ib->semaphore,
+						    fence->ring, ib->ring);
+			radeon_fence_note_sync(fence, ib->ring);
+		}
+	}
+	/* immediately free semaphore when we don't need to sync */
+	if (!need_sync) {
+		radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+	}
+	/* if we can't remember our last VM flush then flush now! */
+	/* XXX figure out why we have to flush for every IB */
+	if (ib->vm /*&& !ib->vm->last_flush*/) {
+		radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
+	}
+	if (const_ib) {
+		radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+		radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+	}
+	radeon_ring_ib_execute(rdev, ib->ring, ib);
+	r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+	if (r) {
+		dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+	if (const_ib) {
+		const_ib->fence = radeon_fence_ref(ib->fence);
+	}
+	/* we just flushed the VM, remember that */
+	if (ib->vm && !ib->vm->last_flush) {
+		ib->vm->last_flush = radeon_fence_ref(ib->fence);
+	}
+	radeon_ring_unlock_commit(rdev, ring);
+	return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->ib_pool_ready) {
+		return 0;
+	}
+	r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+				      RADEON_IB_POOL_SIZE*64*1024,
+				      RADEON_GEM_DOMAIN_GTT);
+	if (r) {
+		return r;
+	}
+
+	r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+	if (r) {
+		return r;
+	}
+
+	rdev->ib_pool_ready = true;
+#ifdef FREEBSD_WIP
+	if (radeon_debugfs_sa_init(rdev)) {
+		dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+	}
+#endif /* FREEBSD_WIP */
+	return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+	if (rdev->ib_pool_ready) {
+		radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+		radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+		rdev->ib_pool_ready = false;
+	}
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+	unsigned i;
+	int r;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_ring *ring = &rdev->ring[i];
+
+		if (!ring->ready)
+			continue;
+
+		r = radeon_ib_test(rdev, i, ring);
+		if (r) {
+			ring->ready = false;
+
+			if (i == RADEON_RING_TYPE_GFX_INDEX) {
+				/* oh, oh, that's really bad */
+				DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+		                rdev->accel_working = false;
+				return r;
+
+			} else {
+				/* still not good, but we can live with it */
+				DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+			}
+		}
+	}
+	return 0;
+}
+
+#ifdef FREEBSD_WIP
+/*
+ * Rings
+ * Most engines on the GPU are fed via ring buffers.  Ring
+ * buffers are areas of GPU accessible memory that the host
+ * writes commands into and the GPU reads commands out of.
+ * There is a rptr (read pointer) that determines where the
+ * GPU is currently reading, and a wptr (write pointer)
+ * which determines where the host has written.  When the
+ * pointers are equal, the ring is idle.  When the host
+ * writes commands to the ring buffer, it increments the
+ * wptr.  The GPU then starts fetching commands and executes
+ * them until the pointers are equal again.
+ */
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+#endif /* FREEBSD_WIP */
+
+/**
+ * radeon_ring_write - write a value to the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ * @v: dword (dw) value to write
+ *
+ * Write a value to the requested ring buffer (all asics).
+ */
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+	if (ring->count_dw <= 0) {
+		DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
+	}
+#endif
+	ring->ring[ring->wptr++] = v;
+	ring->wptr &= ring->ptr_mask;
+	ring->count_dw--;
+	ring->ring_free_dw--;
+}
+
+/**
+ * radeon_ring_supports_scratch_reg - check if the ring supports
+ * writing to scratch registers
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if a specific ring supports writing to scratch registers (all asics).
+ * Returns true if the ring supports writing to scratch regs, false if not.
+ */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+				      struct radeon_ring *ring)
+{
+	switch (ring->idx) {
+	case RADEON_RING_TYPE_GFX_INDEX:
+	case CAYMAN_RING_TYPE_CP1_INDEX:
+	case CAYMAN_RING_TYPE_CP2_INDEX:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * radeon_ring_free_size - update the free size
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the free dw slots in the ring buffer (all asics).
+ */
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+	else
+		rptr = RREG32(ring->rptr_reg);
+	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+	/* This works because ring_size is a power of 2 */
+	ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+	ring->ring_free_dw -= ring->wptr;
+	ring->ring_free_dw &= ring->ptr_mask;
+	if (!ring->ring_free_dw) {
+		ring->ring_free_dw = ring->ring_size / 4;
+	}
+}
+
+/**
+ * radeon_ring_alloc - allocate space on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+{
+	int r;
+
+	/* make sure we aren't trying to allocate more space than there is on the ring */
+	if (ndw > (ring->ring_size / 4))
+		return -ENOMEM;
+	/* Align requested size with padding so unlock_commit can
+	 * pad safely */
+	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+	while (ndw > (ring->ring_free_dw - 1)) {
+		radeon_ring_free_size(rdev, ring);
+		if (ndw < ring->ring_free_dw) {
+			break;
+		}
+		r = radeon_fence_wait_next_locked(rdev, ring->idx);
+		if (r)
+			return r;
+	}
+	ring->count_dw = ndw;
+	ring->wptr_old = ring->wptr;
+	return 0;
+}
+
+/**
+ * radeon_ring_lock - lock the ring and allocate space on it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Lock the ring and allocate @ndw dwords in the ring buffer
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+{
+	int r;
+
+	sx_xlock(&rdev->ring_lock);
+	r = radeon_ring_alloc(rdev, ring, ndw);
+	if (r) {
+		sx_xunlock(&rdev->ring_lock);
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * radeon_ring_commit - tell the GPU to execute the new
+ * commands on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the wptr (write pointer) to tell the GPU to
+ * execute new commands on the ring buffer (all asics).
+ */
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	/* We pad to match fetch size */
+	while (ring->wptr & ring->align_mask) {
+		radeon_ring_write(ring, ring->nop);
+	}
+	DRM_MEMORYBARRIER();
+	WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+	(void)RREG32(ring->wptr_reg);
+}
+
+/**
+ * radeon_ring_unlock_commit - tell the GPU to execute the new
+ * commands on the ring buffer and unlock it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_commit() then unlock the ring (all asics).
+ */
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	radeon_ring_commit(rdev, ring);
+	sx_xunlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_ring_undo - reset the wptr
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Reset the driver's copy of the wptr (all asics).
+ */
+void radeon_ring_undo(struct radeon_ring *ring)
+{
+	ring->wptr = ring->wptr_old;
+}
+
+/**
+ * radeon_ring_unlock_undo - reset the wptr and unlock the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_undo() then unlock the ring (all asics).
+ */
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	radeon_ring_undo(ring);
+	sx_xunlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_ring_force_activity - add some nop packets to the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Add some nop packets to the ring to force activity (all asics).
+ * Used for lockup detection to see if the rptr is advancing.
+ */
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+
+	radeon_ring_free_size(rdev, ring);
+	if (ring->rptr == ring->wptr) {
+		r = radeon_ring_alloc(rdev, ring, 1);
+		if (!r) {
+			radeon_ring_write(ring, ring->nop);
+			radeon_ring_commit(rdev, ring);
+		}
+	}
+}
+
+/**
+ * radeon_ring_lockup_update - update lockup variables
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the last rptr value and timestamp (all asics).
+ */
+void radeon_ring_lockup_update(struct radeon_ring *ring)
+{
+	ring->last_rptr = ring->rptr;
+	ring->last_activity = jiffies;
+}
+
+/**
+ * radeon_ring_test_lockup() - check if ring is lockedup by recording information
+ * @rdev:       radeon device structure
+ * @ring:       radeon_ring structure holding ring information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	unsigned long cjiffies, elapsed;
+	uint32_t rptr;
+
+	cjiffies = jiffies;
+	if (!time_after(cjiffies, ring->last_activity)) {
+		/* likely a wrap around */
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	rptr = RREG32(ring->rptr_reg);
+	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+	if (ring->rptr != ring->last_rptr) {
+		/* CP is still working no lockup */
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
+	if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
+		dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+		return true;
+	}
+	/* give a chance to the GPU ... */
+	return false;
+}
+
+/**
+ * radeon_ring_backup - Back up the content of a ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the ring we want to back up
+ *
+ * Saves all unprocessed commits from a ring, returns the number of dwords saved.
+ */
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+			    uint32_t **data)
+{
+	unsigned size, ptr, i;
+
+	/* just in case lock the ring */
+	sx_xlock(&rdev->ring_lock);
+	*data = NULL;
+
+	if (ring->ring_obj == NULL) {
+		sx_xunlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* it doesn't make sense to save anything if all fences are signaled */
+	if (!radeon_fence_count_emitted(rdev, ring->idx)) {
+		sx_xunlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* calculate the number of dw on the ring */
+	if (ring->rptr_save_reg)
+		ptr = RREG32(ring->rptr_save_reg);
+	else if (rdev->wb.enabled)
+		ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
+	else {
+		/* no way to read back the next rptr */
+		sx_xunlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	size = ring->wptr + (ring->ring_size / 4);
+	size -= ptr;
+	size &= ring->ptr_mask;
+	if (size == 0) {
+		sx_xunlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* and then save the content of the ring */
+	*data = malloc(size * sizeof(uint32_t), DRM_MEM_DRIVER, M_NOWAIT);
+	if (!*data) {
+		sx_xunlock(&rdev->ring_lock);
+		return 0;
+	}
+	for (i = 0; i < size; ++i) {
+		(*data)[i] = ring->ring[ptr++];
+		ptr &= ring->ptr_mask;
+	}
+
+	sx_xunlock(&rdev->ring_lock);
+	return size;
+}
+
+/**
+ * radeon_ring_restore - append saved commands to the ring again
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring to append commands to
+ * @size: number of dwords we want to write
+ * @data: saved commands
+ *
+ * Allocates space on the ring and restore the previously saved commands.
+ */
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+			unsigned size, uint32_t *data)
+{
+	int i, r;
+
+	if (!size || !data)
+		return 0;
+
+	/* restore the saved ring content */
+	r = radeon_ring_lock(rdev, ring, size);
+	if (r)
+		return r;
+
+	for (i = 0; i < size; ++i) {
+		radeon_ring_write(ring, data[i]);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	free(data, DRM_MEM_DRIVER);
+	return 0;
+}
+
+/**
+ * radeon_ring_init - init driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ring_size: size of the ring
+ * @rptr_offs: offset of the rptr writeback location in the WB buffer
+ * @rptr_reg: MMIO offset of the rptr register
+ * @wptr_reg: MMIO offset of the wptr register
+ * @ptr_reg_shift: bit offset of the rptr/wptr values
+ * @ptr_reg_mask: bit mask of the rptr/wptr values
+ * @nop: nop packet for this ring
+ *
+ * Initialize the driver information for the selected ring (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
+{
+	int r;
+	void *ring_ptr; /* FreeBSD: to please GCC 4.2. */
+
+	ring->ring_size = ring_size;
+	ring->rptr_offs = rptr_offs;
+	ring->rptr_reg = rptr_reg;
+	ring->wptr_reg = wptr_reg;
+	ring->ptr_reg_shift = ptr_reg_shift;
+	ring->ptr_reg_mask = ptr_reg_mask;
+	ring->nop = nop;
+	/* Allocate ring buffer */
+	if (ring->ring_obj == NULL) {
+		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT,
+				     NULL, &ring->ring_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) ring create failed\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(ring->ring_obj, false);
+		if (unlikely(r != 0)) {
+			radeon_bo_unref(&ring->ring_obj);
+			return r;
+		}
+		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+					&ring->gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(ring->ring_obj);
+			radeon_bo_unref(&ring->ring_obj);
+			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
+			return r;
+		}
+		ring_ptr = &ring->ring;
+		r = radeon_bo_kmap(ring->ring_obj,
+				       ring_ptr);
+		radeon_bo_unreserve(ring->ring_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) ring map failed\n", r);
+			radeon_bo_unref(&ring->ring_obj);
+			return r;
+		}
+	}
+	ring->ptr_mask = (ring->ring_size / 4) - 1;
+	ring->ring_free_dw = ring->ring_size / 4;
+	if (rdev->wb.enabled) {
+		u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
+		ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
+		ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
+	}
+#ifdef FREEBSD_WIP
+	if (radeon_debugfs_ring_init(rdev, ring)) {
+		DRM_ERROR("Failed to register debugfs file for rings !\n");
+	}
+#endif /* FREEBSD_WIP */
+	radeon_ring_lockup_update(ring);
+	return 0;
+}
+
+/**
+ * radeon_ring_fini - tear down the driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Tear down the driver information for the selected ring (all asics).
+ */
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+	struct radeon_bo *ring_obj;
+
+	sx_xlock(&rdev->ring_lock);
+	ring_obj = ring->ring_obj;
+	ring->ready = false;
+	ring->ring = NULL;
+	ring->ring_obj = NULL;
+	sx_xunlock(&rdev->ring_lock);
+
+	if (ring_obj) {
+		r = radeon_bo_reserve(ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(ring_obj);
+			radeon_bo_unpin(ring_obj);
+			radeon_bo_unreserve(ring_obj);
+		}
+		radeon_bo_unref(&ring_obj);
+	}
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ridx = *(int*)node->info_ent->data;
+	struct radeon_ring *ring = &rdev->ring[ridx];
+	unsigned count, i, j;
+	u32 tmp;
+
+	radeon_ring_free_size(rdev, ring);
+	count = (ring->ring_size / 4) - ring->ring_free_dw;
+	tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
+	seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
+	tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
+	seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
+	if (ring->rptr_save_reg) {
+		seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
+			   RREG32(ring->rptr_save_reg));
+	}
+	seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
+	seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+	seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+	seq_printf(m, "last semaphore wait addr   : 0x%016llx\n", ring->last_semaphore_wait_addr);
+	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+	seq_printf(m, "%u dwords in ring\n", count);
+	/* print 8 dw before current rptr as often it's the last executed
+	 * packet that is the root issue
+	 */
+	i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+	for (j = 0; j <= (count + 32); j++) {
+		seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+		i = (i + 1) & ring->ptr_mask;
+	}
+	return 0;
+}
+
+static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+	{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
+	{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
+	{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+	{"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
+	{"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
+};
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+	return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+        {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+#ifdef FREEBSD_WIP
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+	for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
+		struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
+		int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
+		unsigned r;
+
+		if (&rdev->ring[ridx] != ring)
+			continue;
+
+		r = radeon_debugfs_add_files(rdev, info, 1);
+		if (r)
+			return r;
+	}
+#endif
+	return 0;
+}
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+	return 0;
+#endif
+}
+#endif /* FREEBSD_WIP */


Property changes on: trunk/sys/dev/drm2/radeon/radeon_ring.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_sa.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_sa.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_sa.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,431 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse at freedesktop.org>
+ */
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_sa.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+			      struct radeon_sa_manager *sa_manager,
+			      unsigned size, u32 domain)
+{
+	int i, r;
+
+	sx_init(&sa_manager->wq_lock, "drm__radeon_sa_manager_wq_mtx");
+	cv_init(&sa_manager->wq, "drm__radeon_sa_manager__wq");
+	sa_manager->bo = NULL;
+	sa_manager->size = size;
+	sa_manager->domain = domain;
+	sa_manager->hole = &sa_manager->olist;
+	INIT_LIST_HEAD(&sa_manager->olist);
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		INIT_LIST_HEAD(&sa_manager->flist[i]);
+	}
+
+	r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+		return r;
+	}
+
+	return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+			       struct radeon_sa_manager *sa_manager)
+{
+	struct radeon_sa_bo *sa_bo, *tmp;
+
+	if (!list_empty(&sa_manager->olist)) {
+		sa_manager->hole = &sa_manager->olist,
+		radeon_sa_bo_try_free(sa_manager);
+		if (!list_empty(&sa_manager->olist)) {
+			dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+		}
+	}
+	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+		radeon_sa_bo_remove_locked(sa_bo);
+	}
+	radeon_bo_unref(&sa_manager->bo);
+	sa_manager->size = 0;
+	cv_destroy(&sa_manager->wq);
+	sx_destroy(&sa_manager->wq_lock);
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+			       struct radeon_sa_manager *sa_manager)
+{
+	int r;
+
+	if (sa_manager->bo == NULL) {
+		dev_err(rdev->dev, "no bo for sa manager\n");
+		return -EINVAL;
+	}
+
+	/* map the buffer */
+	r = radeon_bo_reserve(sa_manager->bo, false);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+		return r;
+	}
+	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(sa_manager->bo);
+		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+		return r;
+	}
+	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+	radeon_bo_unreserve(sa_manager->bo);
+	return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+				 struct radeon_sa_manager *sa_manager)
+{
+	int r;
+
+	if (sa_manager->bo == NULL) {
+		dev_err(rdev->dev, "no bo for sa manager\n");
+		return -EINVAL;
+	}
+
+	r = radeon_bo_reserve(sa_manager->bo, false);
+	if (!r) {
+		radeon_bo_kunmap(sa_manager->bo);
+		radeon_bo_unpin(sa_manager->bo);
+		radeon_bo_unreserve(sa_manager->bo);
+	}
+	return r;
+}
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+	struct radeon_sa_manager *sa_manager = sa_bo->manager;
+	if (sa_manager->hole == &sa_bo->olist) {
+		sa_manager->hole = sa_bo->olist.prev;
+	}
+	list_del_init(&sa_bo->olist);
+	list_del_init(&sa_bo->flist);
+	radeon_fence_unref(&sa_bo->fence);
+	free(sa_bo, DRM_MEM_DRIVER);
+}
+
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
+{
+	struct radeon_sa_bo *sa_bo, *tmp;
+
+	if (sa_manager->hole->next == &sa_manager->olist)
+		return;
+
+	sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
+	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+		if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
+			return;
+		}
+		radeon_sa_bo_remove_locked(sa_bo);
+	}
+}
+
+static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
+{
+	struct list_head *hole = sa_manager->hole;
+
+	if (hole != &sa_manager->olist) {
+		return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
+	}
+	return 0;
+}
+
+static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
+{
+	struct list_head *hole = sa_manager->hole;
+
+	if (hole->next != &sa_manager->olist) {
+		return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
+	}
+	return sa_manager->size;
+}
+
+static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
+				   struct radeon_sa_bo *sa_bo,
+				   unsigned size, unsigned align)
+{
+	unsigned soffset, eoffset, wasted;
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+	wasted = (align - (soffset % align)) % align;
+
+	if ((eoffset - soffset) >= (size + wasted)) {
+		soffset += wasted;
+
+		sa_bo->manager = sa_manager;
+		sa_bo->soffset = soffset;
+		sa_bo->eoffset = soffset + size;
+		list_add(&sa_bo->olist, sa_manager->hole);
+		INIT_LIST_HEAD(&sa_bo->flist);
+		sa_manager->hole = &sa_bo->olist;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_sa_event - Check if we can stop waiting
+ *
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Check if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly
+ */
+static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
+			    unsigned size, unsigned align)
+{
+	unsigned soffset, eoffset, wasted;
+	int i;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!list_empty(&sa_manager->flist[i])) {
+			return true;
+		}
+	}
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+	wasted = (align - (soffset % align)) % align;
+
+	if ((eoffset - soffset) >= (size + wasted)) {
+		return true;
+	}
+
+	return false;
+}
+
+static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
+				   struct radeon_fence **fences,
+				   unsigned *tries)
+{
+	struct radeon_sa_bo *best_bo = NULL;
+	unsigned i, soffset, best, tmp;
+
+	/* if hole points to the end of the buffer */
+	if (sa_manager->hole->next == &sa_manager->olist) {
+		/* try again with its beginning */
+		sa_manager->hole = &sa_manager->olist;
+		return true;
+	}
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	/* to handle wrap around we add sa_manager->size */
+	best = sa_manager->size * 2;
+	/* go over all fence list and try to find the closest sa_bo
+	 * of the current last
+	 */
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_sa_bo *sa_bo;
+
+		if (list_empty(&sa_manager->flist[i])) {
+			continue;
+		}
+
+		sa_bo = list_first_entry(&sa_manager->flist[i],
+					 struct radeon_sa_bo, flist);
+
+		if (!radeon_fence_signaled(sa_bo->fence)) {
+			fences[i] = sa_bo->fence;
+			continue;
+		}
+
+		/* limit the number of tries each ring gets */
+		if (tries[i] > 2) {
+			continue;
+		}
+
+		tmp = sa_bo->soffset;
+		if (tmp < soffset) {
+			/* wrap around, pretend it's after */
+			tmp += sa_manager->size;
+		}
+		tmp -= soffset;
+		if (tmp < best) {
+			/* this sa bo is the closest one */
+			best = tmp;
+			best_bo = sa_bo;
+		}
+	}
+
+	if (best_bo) {
+		++tries[best_bo->fence->ring];
+		sa_manager->hole = best_bo->olist.prev;
+
+		/* we knew that this one is signaled,
+		   so it's save to remote it */
+		radeon_sa_bo_remove_locked(best_bo);
+		return true;
+	}
+	return false;
+}
+
+int radeon_sa_bo_new(struct radeon_device *rdev,
+		     struct radeon_sa_manager *sa_manager,
+		     struct radeon_sa_bo **sa_bo,
+		     unsigned size, unsigned align, bool block)
+{
+	struct radeon_fence *fences[RADEON_NUM_RINGS];
+	unsigned tries[RADEON_NUM_RINGS];
+	int i, r;
+
+	KASSERT(align <= RADEON_GPU_PAGE_SIZE, ("align > RADEON_GPU_PAGE_SIZE"));
+	KASSERT(size <= sa_manager->size, ("size > sa_manager->size"));
+
+	*sa_bo = malloc(sizeof(struct radeon_sa_bo), DRM_MEM_DRIVER, M_NOWAIT);
+	if ((*sa_bo) == NULL) {
+		return -ENOMEM;
+	}
+	(*sa_bo)->manager = sa_manager;
+	(*sa_bo)->fence = NULL;
+	INIT_LIST_HEAD(&(*sa_bo)->olist);
+	INIT_LIST_HEAD(&(*sa_bo)->flist);
+
+	sx_xlock(&sa_manager->wq_lock);
+	do {
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			fences[i] = NULL;
+			tries[i] = 0;
+		}
+
+		do {
+			radeon_sa_bo_try_free(sa_manager);
+
+			if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
+						   size, align)) {
+				sx_xunlock(&sa_manager->wq_lock);
+				return 0;
+			}
+
+			/* see if we can skip over some allocations */
+		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
+		sx_xunlock(&sa_manager->wq_lock);
+		r = radeon_fence_wait_any(rdev, fences, false);
+		sx_xlock(&sa_manager->wq_lock);
+		/* if we have nothing to wait for block */
+		if (r == -ENOENT && block) {
+			while (!radeon_sa_event(sa_manager, size, align)) {
+				r = -cv_wait_sig(&sa_manager->wq,
+				    &sa_manager->wq_lock);
+				if (r == -EINTR)
+					r = -ERESTARTSYS;
+				if (r != 0)
+					break;
+			}
+
+		} else if (r == -ENOENT) {
+			r = -ENOMEM;
+		}
+
+	} while (!r);
+
+	sx_xunlock(&sa_manager->wq_lock);
+	free(*sa_bo, DRM_MEM_DRIVER);
+	*sa_bo = NULL;
+	return r;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+		       struct radeon_fence *fence)
+{
+	struct radeon_sa_manager *sa_manager;
+
+	if (sa_bo == NULL || *sa_bo == NULL) {
+		return;
+	}
+
+	sa_manager = (*sa_bo)->manager;
+	sx_xlock(&sa_manager->wq_lock);
+	if (fence && !radeon_fence_signaled(fence)) {
+		(*sa_bo)->fence = radeon_fence_ref(fence);
+		list_add_tail(&(*sa_bo)->flist,
+			      &sa_manager->flist[fence->ring]);
+	} else {
+		radeon_sa_bo_remove_locked(*sa_bo);
+	}
+	cv_broadcast(&sa_manager->wq);
+	sx_xunlock(&sa_manager->wq_lock);
+	*sa_bo = NULL;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+				  struct seq_file *m)
+{
+	struct radeon_sa_bo *i;
+
+	spin_lock(&sa_manager->wq.lock);
+	list_for_each_entry(i, &sa_manager->olist, olist) {
+		if (&i->olist == sa_manager->hole) {
+			seq_printf(m, ">");
+		} else {
+			seq_printf(m, " ");
+		}
+		seq_printf(m, "[0x%08x 0x%08x] size %8d",
+			   i->soffset, i->eoffset, i->eoffset - i->soffset);
+		if (i->fence) {
+			seq_printf(m, " protected by 0x%016llx on ring %d",
+				   i->fence->seq, i->fence->ring);
+		}
+		seq_printf(m, "\n");
+	}
+	spin_unlock(&sa_manager->wq.lock);
+}
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/radeon_sa.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_semaphore.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_semaphore.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_semaphore.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,125 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_semaphore.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+/*
+ * Authors:
+ *    Christian König <deathsimple at vodafone.de>
+ */
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+			    struct radeon_semaphore **semaphore)
+{
+	int r;
+
+	*semaphore = malloc(sizeof(struct radeon_semaphore),
+	    DRM_MEM_DRIVER, M_NOWAIT);
+	if (*semaphore == NULL) {
+		return -ENOMEM;
+	}
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+			     &(*semaphore)->sa_bo, 8, 8, true);
+	if (r) {
+		free(*semaphore, DRM_MEM_DRIVER);
+		*semaphore = NULL;
+		return r;
+	}
+	(*semaphore)->waiters = 0;
+	(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
+	*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+	return 0;
+}
+
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+			          struct radeon_semaphore *semaphore)
+{
+	--semaphore->waiters;
+	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
+
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+			        struct radeon_semaphore *semaphore)
+{
+	++semaphore->waiters;
+	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+}
+
+/* caller must hold ring lock */
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+				struct radeon_semaphore *semaphore,
+				int signaler, int waiter)
+{
+	int r;
+
+	/* no need to signal and wait on the same ring */
+	if (signaler == waiter) {
+		return 0;
+	}
+
+	/* prevent GPU deadlocks */
+	if (!rdev->ring[signaler].ready) {
+		dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+		return -EINVAL;
+	}
+
+	r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
+	if (r) {
+		return r;
+	}
+	radeon_semaphore_emit_signal(rdev, signaler, semaphore);
+	radeon_ring_commit(rdev, &rdev->ring[signaler]);
+
+	/* we assume caller has already allocated space on waiters ring */
+	radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+
+	/* for debugging lockup only, used by sysfs debug files */
+	rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+	rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
+	return 0;
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+			   struct radeon_semaphore **semaphore,
+			   struct radeon_fence *fence)
+{
+	if (semaphore == NULL || *semaphore == NULL) {
+		return;
+	}
+	if ((*semaphore)->waiters > 0) {
+		dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
+			" hardware lockup imminent!\n", *semaphore);
+	}
+	radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
+	free(*semaphore, DRM_MEM_DRIVER);
+	*semaphore = NULL;
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_semaphore.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_state.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_state.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_state.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,3263 @@
+/* $MidnightBSD$ */
+/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth at valinux.com>
+ *    Kevin E. Martin <martin at valinux.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_state.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_buffer.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_drv.h"
+
+/* ================================================================
+ * Helper functions for client state checking and fixup
+ */
+
+static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
+						    dev_priv,
+						    struct drm_file * file_priv,
+						    u32 *offset)
+{
+	u64 off = *offset;
+	u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	/* Hrm ... the story of the offset ... So this function converts
+	 * the various ideas of what userland clients might have for an
+	 * offset in the card address space into an offset into the card
+	 * address space :) So with a sane client, it should just keep
+	 * the value intact and just do some boundary checking. However,
+	 * not all clients are sane. Some older clients pass us 0 based
+	 * offsets relative to the start of the framebuffer and some may
+	 * assume the AGP aperture it appended to the framebuffer, so we
+	 * try to detect those cases and fix them up.
+	 *
+	 * Note: It might be a good idea here to make sure the offset lands
+	 * in some "allowed" area to protect things like the PCIE GART...
+	 */
+
+	/* First, the best case, the offset already lands in either the
+	 * framebuffer or the GART mapped space
+	 */
+	if (radeon_check_offset(dev_priv, off))
+		return 0;
+
+	/* Ok, that didn't happen... now check if we have a zero based
+	 * offset that fits in the framebuffer + gart space, apply the
+	 * magic offset we get from SETPARAM or calculated from fb_location
+	 */
+	if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
+		radeon_priv = file_priv->driver_priv;
+		off += radeon_priv->radeon_fb_delta;
+	}
+
+	/* Finally, assume we aimed at a GART offset if beyond the fb */
+	if (off > fb_end)
+		off = off - fb_end - 1 + dev_priv->gart_vm_start;
+
+	/* Now recheck and fail if out of bounds */
+	if (radeon_check_offset(dev_priv, off)) {
+		DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
+		*offset = off;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
+						     dev_priv,
+						     struct drm_file *file_priv,
+						     int id, struct drm_buffer *buf)
+{
+	u32 *data;
+	switch (id) {
+
+	case RADEON_EMIT_PP_MISC:
+		data = drm_buffer_pointer_to_dword(buf,
+			(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
+
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+		dev_priv->have_z_offset = 1;
+		break;
+
+	case RADEON_EMIT_PP_CNTL:
+		data = drm_buffer_pointer_to_dword(buf,
+			(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
+
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+			DRM_ERROR("Invalid colour buffer offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case R200_EMIT_PP_TXOFFSET_0:
+	case R200_EMIT_PP_TXOFFSET_1:
+	case R200_EMIT_PP_TXOFFSET_2:
+	case R200_EMIT_PP_TXOFFSET_3:
+	case R200_EMIT_PP_TXOFFSET_4:
+	case R200_EMIT_PP_TXOFFSET_5:
+		data = drm_buffer_pointer_to_dword(buf, 0);
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+			DRM_ERROR("Invalid R200 texture offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_EMIT_PP_TXFILTER_0:
+	case RADEON_EMIT_PP_TXFILTER_1:
+	case RADEON_EMIT_PP_TXFILTER_2:
+		data = drm_buffer_pointer_to_dword(buf,
+			(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+			DRM_ERROR("Invalid R100 texture offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case R200_EMIT_PP_CUBIC_OFFSETS_0:
+	case R200_EMIT_PP_CUBIC_OFFSETS_1:
+	case R200_EMIT_PP_CUBIC_OFFSETS_2:
+	case R200_EMIT_PP_CUBIC_OFFSETS_3:
+	case R200_EMIT_PP_CUBIC_OFFSETS_4:
+	case R200_EMIT_PP_CUBIC_OFFSETS_5:{
+			int i;
+			for (i = 0; i < 5; i++) {
+				data = drm_buffer_pointer_to_dword(buf, i);
+				if (radeon_check_and_fixup_offset(dev_priv,
+								  file_priv,
+								  data)) {
+					DRM_ERROR
+					    ("Invalid R200 cubic texture offset\n");
+					return -EINVAL;
+				}
+			}
+			break;
+		}
+
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
+			int i;
+			for (i = 0; i < 5; i++) {
+				data = drm_buffer_pointer_to_dword(buf, i);
+				if (radeon_check_and_fixup_offset(dev_priv,
+								  file_priv,
+								  data)) {
+					DRM_ERROR
+					    ("Invalid R100 cubic texture offset\n");
+					return -EINVAL;
+				}
+			}
+		}
+		break;
+
+	case R200_EMIT_VAP_CTL:{
+			RING_LOCALS;
+			BEGIN_RING(2);
+			OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+			ADVANCE_RING();
+		}
+		break;
+
+	case RADEON_EMIT_RB3D_COLORPITCH:
+	case RADEON_EMIT_RE_LINE_PATTERN:
+	case RADEON_EMIT_SE_LINE_WIDTH:
+	case RADEON_EMIT_PP_LUM_MATRIX:
+	case RADEON_EMIT_PP_ROT_MATRIX_0:
+	case RADEON_EMIT_RB3D_STENCILREFMASK:
+	case RADEON_EMIT_SE_VPORT_XSCALE:
+	case RADEON_EMIT_SE_CNTL:
+	case RADEON_EMIT_SE_CNTL_STATUS:
+	case RADEON_EMIT_RE_MISC:
+	case RADEON_EMIT_PP_BORDER_COLOR_0:
+	case RADEON_EMIT_PP_BORDER_COLOR_1:
+	case RADEON_EMIT_PP_BORDER_COLOR_2:
+	case RADEON_EMIT_SE_ZBIAS_FACTOR:
+	case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
+	case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
+	case R200_EMIT_PP_TXCBLEND_0:
+	case R200_EMIT_PP_TXCBLEND_1:
+	case R200_EMIT_PP_TXCBLEND_2:
+	case R200_EMIT_PP_TXCBLEND_3:
+	case R200_EMIT_PP_TXCBLEND_4:
+	case R200_EMIT_PP_TXCBLEND_5:
+	case R200_EMIT_PP_TXCBLEND_6:
+	case R200_EMIT_PP_TXCBLEND_7:
+	case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
+	case R200_EMIT_TFACTOR_0:
+	case R200_EMIT_VTX_FMT_0:
+	case R200_EMIT_MATRIX_SELECT_0:
+	case R200_EMIT_TEX_PROC_CTL_2:
+	case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
+	case R200_EMIT_PP_TXFILTER_0:
+	case R200_EMIT_PP_TXFILTER_1:
+	case R200_EMIT_PP_TXFILTER_2:
+	case R200_EMIT_PP_TXFILTER_3:
+	case R200_EMIT_PP_TXFILTER_4:
+	case R200_EMIT_PP_TXFILTER_5:
+	case R200_EMIT_VTE_CNTL:
+	case R200_EMIT_OUTPUT_VTX_COMP_SEL:
+	case R200_EMIT_PP_TAM_DEBUG3:
+	case R200_EMIT_PP_CNTL_X:
+	case R200_EMIT_RB3D_DEPTHXY_OFFSET:
+	case R200_EMIT_RE_AUX_SCISSOR_CNTL:
+	case R200_EMIT_RE_SCISSOR_TL_0:
+	case R200_EMIT_RE_SCISSOR_TL_1:
+	case R200_EMIT_RE_SCISSOR_TL_2:
+	case R200_EMIT_SE_VAP_CNTL_STATUS:
+	case R200_EMIT_SE_VTX_STATE_CNTL:
+	case R200_EMIT_RE_POINTSIZE:
+	case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
+	case R200_EMIT_PP_CUBIC_FACES_0:
+	case R200_EMIT_PP_CUBIC_FACES_1:
+	case R200_EMIT_PP_CUBIC_FACES_2:
+	case R200_EMIT_PP_CUBIC_FACES_3:
+	case R200_EMIT_PP_CUBIC_FACES_4:
+	case R200_EMIT_PP_CUBIC_FACES_5:
+	case RADEON_EMIT_PP_TEX_SIZE_0:
+	case RADEON_EMIT_PP_TEX_SIZE_1:
+	case RADEON_EMIT_PP_TEX_SIZE_2:
+	case R200_EMIT_RB3D_BLENDCOLOR:
+	case R200_EMIT_TCL_POINT_SPRITE_CNTL:
+	case RADEON_EMIT_PP_CUBIC_FACES_0:
+	case RADEON_EMIT_PP_CUBIC_FACES_1:
+	case RADEON_EMIT_PP_CUBIC_FACES_2:
+	case R200_EMIT_PP_TRI_PERF_CNTL:
+	case R200_EMIT_PP_AFS_0:
+	case R200_EMIT_PP_AFS_1:
+	case R200_EMIT_ATF_TFACTOR:
+	case R200_EMIT_PP_TXCTLALL_0:
+	case R200_EMIT_PP_TXCTLALL_1:
+	case R200_EMIT_PP_TXCTLALL_2:
+	case R200_EMIT_PP_TXCTLALL_3:
+	case R200_EMIT_PP_TXCTLALL_4:
+	case R200_EMIT_PP_TXCTLALL_5:
+	case R200_EMIT_VAP_PVS_CNTL:
+		/* These packets don't contain memory offsets */
+		break;
+
+	default:
+		DRM_ERROR("Unknown state packet ID %d\n", id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int radeon_check_and_fixup_packet3(drm_radeon_private_t *
+					  dev_priv,
+					  struct drm_file *file_priv,
+					  drm_radeon_kcmd_buffer_t *
+					  cmdbuf,
+					  unsigned int *cmdsz)
+{
+	u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+	u32 offset, narrays;
+	int count, i, k;
+
+	count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+	*cmdsz = 2 + count;
+
+	if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
+		DRM_ERROR("Not a type 3 packet\n");
+		return -EINVAL;
+	}
+
+	if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
+		DRM_ERROR("Packet size larger than size of data provided\n");
+		return -EINVAL;
+	}
+
+	switch (*cmd & 0xff00) {
+	/* XXX Are there old drivers needing other packets? */
+
+	case RADEON_3D_DRAW_IMMD:
+	case RADEON_3D_DRAW_VBUF:
+	case RADEON_3D_DRAW_INDX:
+	case RADEON_WAIT_FOR_IDLE:
+	case RADEON_CP_NOP:
+	case RADEON_3D_CLEAR_ZMASK:
+/*	case RADEON_CP_NEXT_CHAR:
+	case RADEON_CP_PLY_NEXTSCAN:
+	case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
+		/* these packets are safe */
+		break;
+
+	case RADEON_CP_3D_DRAW_IMMD_2:
+	case RADEON_CP_3D_DRAW_VBUF_2:
+	case RADEON_CP_3D_DRAW_INDX_2:
+	case RADEON_3D_CLEAR_HIZ:
+		/* safe but r200 only */
+		if (dev_priv->microcode_version != UCODE_R200) {
+			DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_3D_LOAD_VBPNTR:
+
+		if (count > 18) { /* 12 arrays max */
+			DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+				  count);
+			return -EINVAL;
+		}
+
+		/* carefully check packet contents */
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+
+		narrays = *cmd & ~0xc000;
+		k = 0;
+		i = 2;
+		while ((k < narrays) && (i < (count + 2))) {
+			i++;		/* skip attribute field */
+			cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+			if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+							  cmd)) {
+				DRM_ERROR
+				    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
+				     k, i);
+				return -EINVAL;
+			}
+			k++;
+			i++;
+			if (k == narrays)
+				break;
+			/* have one more to process, they come in pairs */
+			cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+
+			if (radeon_check_and_fixup_offset(dev_priv,
+							  file_priv, cmd))
+			{
+				DRM_ERROR
+				    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
+				     k, i);
+				return -EINVAL;
+			}
+			k++;
+			i++;
+		}
+		/* do the counts match what we expect ? */
+		if ((k != narrays) || (i != (count + 2))) {
+			DRM_ERROR
+			    ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
+			      k, i, narrays, count + 1);
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_3D_RNDR_GEN_INDX_PRIM:
+		if (dev_priv->microcode_version != UCODE_R100) {
+			DRM_ERROR("Invalid 3d packet for r200-class chip\n");
+			return -EINVAL;
+		}
+
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+				DRM_ERROR("Invalid rndr_gen_indx offset\n");
+				return -EINVAL;
+		}
+		break;
+
+	case RADEON_CP_INDX_BUFFER:
+		if (dev_priv->microcode_version != UCODE_R200) {
+			DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+			return -EINVAL;
+		}
+
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		if ((*cmd & 0x8000ffff) != 0x80000810) {
+			DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
+			return -EINVAL;
+		}
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+			DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_CNTL_HOSTDATA_BLT:
+	case RADEON_CNTL_PAINT_MULTI:
+	case RADEON_CNTL_BITBLT_MULTI:
+		/* MSB of opcode: next DWORD GUI_CNTL */
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+			      | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+			offset = *cmd2 << 10;
+			if (radeon_check_and_fixup_offset
+			    (dev_priv, file_priv, &offset)) {
+				DRM_ERROR("Invalid first packet offset\n");
+				return -EINVAL;
+			}
+			*cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
+		}
+
+		if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+		    (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+			offset = *cmd3 << 10;
+			if (radeon_check_and_fixup_offset
+			    (dev_priv, file_priv, &offset)) {
+				DRM_ERROR("Invalid second packet offset\n");
+				return -EINVAL;
+			}
+			*cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
+		}
+		break;
+
+	default:
+		DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* ================================================================
+ * CP hardware state programming functions
+ */
+
+static void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
+				  struct drm_clip_rect * box)
+{
+	RING_LOCALS;
+
+	DRM_DEBUG("   box:  x1=%d y1=%d  x2=%d y2=%d\n",
+		  box->x1, box->y1, box->x2, box->y2);
+
+	BEGIN_RING(4);
+	OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
+	OUT_RING((box->y1 << 16) | box->x1);
+	OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
+	OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
+	ADVANCE_RING();
+}
+
+/* Emit 1.1 state
+ */
+static int radeon_emit_state(drm_radeon_private_t * dev_priv,
+			     struct drm_file *file_priv,
+			     drm_radeon_context_regs_t * ctx,
+			     drm_radeon_texture_regs_t * tex,
+			     unsigned int dirty)
+{
+	RING_LOCALS;
+	DRM_DEBUG("dirty=0x%08x\n", dirty);
+
+	if (dirty & RADEON_UPLOAD_CONTEXT) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &ctx->rb3d_depthoffset)) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &ctx->rb3d_coloroffset)) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(14);
+		OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
+		OUT_RING(ctx->pp_misc);
+		OUT_RING(ctx->pp_fog_color);
+		OUT_RING(ctx->re_solid_color);
+		OUT_RING(ctx->rb3d_blendcntl);
+		OUT_RING(ctx->rb3d_depthoffset);
+		OUT_RING(ctx->rb3d_depthpitch);
+		OUT_RING(ctx->rb3d_zstencilcntl);
+		OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
+		OUT_RING(ctx->pp_cntl);
+		OUT_RING(ctx->rb3d_cntl);
+		OUT_RING(ctx->rb3d_coloroffset);
+		OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
+		OUT_RING(ctx->rb3d_colorpitch);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_VERTFMT) {
+		BEGIN_RING(2);
+		OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
+		OUT_RING(ctx->se_coord_fmt);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_LINE) {
+		BEGIN_RING(5);
+		OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
+		OUT_RING(ctx->re_line_pattern);
+		OUT_RING(ctx->re_line_state);
+		OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
+		OUT_RING(ctx->se_line_width);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_BUMPMAP) {
+		BEGIN_RING(5);
+		OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
+		OUT_RING(ctx->pp_lum_matrix);
+		OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
+		OUT_RING(ctx->pp_rot_matrix_0);
+		OUT_RING(ctx->pp_rot_matrix_1);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_MASKS) {
+		BEGIN_RING(4);
+		OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
+		OUT_RING(ctx->rb3d_stencilrefmask);
+		OUT_RING(ctx->rb3d_ropcntl);
+		OUT_RING(ctx->rb3d_planemask);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_VIEWPORT) {
+		BEGIN_RING(7);
+		OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
+		OUT_RING(ctx->se_vport_xscale);
+		OUT_RING(ctx->se_vport_xoffset);
+		OUT_RING(ctx->se_vport_yscale);
+		OUT_RING(ctx->se_vport_yoffset);
+		OUT_RING(ctx->se_vport_zscale);
+		OUT_RING(ctx->se_vport_zoffset);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_SETUP) {
+		BEGIN_RING(4);
+		OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
+		OUT_RING(ctx->se_cntl);
+		OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
+		OUT_RING(ctx->se_cntl_status);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_MISC) {
+		BEGIN_RING(2);
+		OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
+		OUT_RING(ctx->re_misc);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX0) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[0].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 0\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
+		OUT_RING(tex[0].pp_txfilter);
+		OUT_RING(tex[0].pp_txformat);
+		OUT_RING(tex[0].pp_txoffset);
+		OUT_RING(tex[0].pp_txcblend);
+		OUT_RING(tex[0].pp_txablend);
+		OUT_RING(tex[0].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
+		OUT_RING(tex[0].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX1) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[1].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 1\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
+		OUT_RING(tex[1].pp_txfilter);
+		OUT_RING(tex[1].pp_txformat);
+		OUT_RING(tex[1].pp_txoffset);
+		OUT_RING(tex[1].pp_txcblend);
+		OUT_RING(tex[1].pp_txablend);
+		OUT_RING(tex[1].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
+		OUT_RING(tex[1].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX2) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[2].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 2\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
+		OUT_RING(tex[2].pp_txfilter);
+		OUT_RING(tex[2].pp_txformat);
+		OUT_RING(tex[2].pp_txoffset);
+		OUT_RING(tex[2].pp_txcblend);
+		OUT_RING(tex[2].pp_txablend);
+		OUT_RING(tex[2].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
+		OUT_RING(tex[2].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	return 0;
+}
+
+/* Emit 1.2 state
+ */
+static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
+			      struct drm_file *file_priv,
+			      drm_radeon_state_t * state)
+{
+	RING_LOCALS;
+
+	if (state->dirty & RADEON_UPLOAD_ZBIAS) {
+		BEGIN_RING(3);
+		OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
+		OUT_RING(state->context2.se_zbias_factor);
+		OUT_RING(state->context2.se_zbias_constant);
+		ADVANCE_RING();
+	}
+
+	return radeon_emit_state(dev_priv, file_priv, &state->context,
+				 state->tex, state->dirty);
+}
+
+/* New (1.3) state mechanism.  3 commands (packet, scalar, vector) in
+ * 1.3 cmdbuffers allow all previous state to be updated as well as
+ * the tcl scalar and vector areas.
+ */
+static struct {
+	int start;
+	int len;
+	const char *name;
+} packet[RADEON_MAX_STATE_PACKETS] = {
+	{RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
+	{RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
+	{RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
+	{RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
+	{RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
+	{RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
+	{RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
+	{RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
+	{RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
+	{RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
+	{RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
+	{RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
+	{RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
+	{RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
+	{RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
+	{RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
+	{RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
+	{RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
+	{RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
+	{RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
+	{RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
+		    "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
+	{R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
+	{R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
+	{R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
+	{R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
+	{R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
+	{R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
+	{R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
+	{R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
+	{R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
+	{R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
+	{R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
+	{R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
+	{R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
+	{R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
+	{R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
+	{R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
+	{R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
+	{R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
+	{R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
+	{R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
+	{R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
+	{R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
+	{R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
+	{R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
+	{R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
+	{R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
+	{R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
+	{R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
+	{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
+	 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
+	{R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
+	{R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
+	{R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
+	{R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
+	{R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
+	{R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
+	{R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
+	{R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
+	{R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
+	{R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
+	{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
+		    "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
+	{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"},	/* 61 */
+	{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
+	{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
+	{R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
+	{R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
+	{R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
+	{R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
+	{R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
+	{R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
+	{R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
+	{R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
+	{R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
+	{RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
+	{RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
+	{RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
+	{R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
+	{R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
+	{RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
+	{RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
+	{RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
+	{RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
+	{RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
+	{RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
+	{R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
+	{R200_PP_AFS_0, 32, "R200_PP_AFS_0"},     /* 85 */
+	{R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
+	{R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
+	{R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
+	{R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
+	{R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
+	{R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
+	{R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
+	{R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
+	{R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
+};
+
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void radeon_clear_box(drm_radeon_private_t * dev_priv,
+			     struct drm_radeon_master_private *master_priv,
+			     int x, int y, int w, int h, int r, int g, int b)
+{
+	u32 color;
+	RING_LOCALS;
+
+	x += master_priv->sarea_priv->boxes[0].x1;
+	y += master_priv->sarea_priv->boxes[0].y1;
+
+	switch (dev_priv->color_fmt) {
+	case RADEON_COLOR_FORMAT_RGB565:
+		color = (((r & 0xf8) << 8) |
+			 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
+		break;
+	case RADEON_COLOR_FORMAT_ARGB8888:
+	default:
+		color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
+		break;
+	}
+
+	BEGIN_RING(4);
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+	OUT_RING(0xffffffff);
+	ADVANCE_RING();
+
+	BEGIN_RING(6);
+
+	OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
+	OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+		 RADEON_GMC_BRUSH_SOLID_COLOR |
+		 (dev_priv->color_fmt << 8) |
+		 RADEON_GMC_SRC_DATATYPE_COLOR |
+		 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+	if (master_priv->sarea_priv->pfCurrentPage == 1) {
+		OUT_RING(dev_priv->front_pitch_offset);
+	} else {
+		OUT_RING(dev_priv->back_pitch_offset);
+	}
+
+	OUT_RING(color);
+
+	OUT_RING((x << 16) | y);
+	OUT_RING((w << 16) | h);
+
+	ADVANCE_RING();
+}
+
+static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv)
+{
+	/* Collapse various things into a wait flag -- trying to
+	 * guess if userspase slept -- better just to have them tell us.
+	 */
+	if (dev_priv->stats.last_frame_reads > 1 ||
+	    dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+	}
+
+	if (dev_priv->stats.freelist_loops) {
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+	}
+
+	/* Purple box for page flipping
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
+		radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255);
+
+	/* Red box if we have to wait for idle at any point
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
+		radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0);
+
+	/* Blue box: lost context?
+	 */
+
+	/* Yellow box for texture swaps
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
+		radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0);
+
+	/* Green box if hardware never idles (as far as we can tell)
+	 */
+	if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
+		radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0);
+
+	/* Draw bars indicating number of buffers allocated
+	 * (not a great measure, easily confused)
+	 */
+	if (dev_priv->stats.requested_bufs) {
+		if (dev_priv->stats.requested_bufs > 100)
+			dev_priv->stats.requested_bufs = 100;
+
+		radeon_clear_box(dev_priv, master_priv, 4, 16,
+				 dev_priv->stats.requested_bufs, 4,
+				 196, 128, 128);
+	}
+
+	memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
+
+}
+
+/* ================================================================
+ * CP command dispatch functions
+ */
+
+static void radeon_cp_dispatch_clear(struct drm_device * dev,
+				     struct drm_master *master,
+				     drm_radeon_clear_t * clear,
+				     drm_radeon_clear_rect_t * depth_boxes)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	unsigned int flags = clear->flags;
+	u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("flags = 0x%x\n", flags);
+
+	dev_priv->stats.clears++;
+
+	if (sarea_priv->pfCurrentPage == 1) {
+		unsigned int tmp = flags;
+
+		flags &= ~(RADEON_FRONT | RADEON_BACK);
+		if (tmp & RADEON_FRONT)
+			flags |= RADEON_BACK;
+		if (tmp & RADEON_BACK)
+			flags |= RADEON_FRONT;
+	}
+	if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
+		if (!dev_priv->have_z_offset) {
+			DRM_ERROR("radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
+			flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+		}
+	}
+
+	if (flags & (RADEON_FRONT | RADEON_BACK)) {
+
+		BEGIN_RING(4);
+
+		/* Ensure the 3D stream is idle before doing a
+		 * 2D fill to clear the front or back buffer.
+		 */
+		RADEON_WAIT_UNTIL_3D_IDLE();
+
+		OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+		OUT_RING(clear->color_mask);
+
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+			int x = pbox[i].x1;
+			int y = pbox[i].y1;
+			int w = pbox[i].x2 - x;
+			int h = pbox[i].y2 - y;
+
+			DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
+				  x, y, w, h, flags);
+
+			if (flags & RADEON_FRONT) {
+				BEGIN_RING(6);
+
+				OUT_RING(CP_PACKET3
+					 (RADEON_CNTL_PAINT_MULTI, 4));
+				OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+					 RADEON_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->
+					  color_fmt << 8) |
+					 RADEON_GMC_SRC_DATATYPE_COLOR |
+					 RADEON_ROP3_P |
+					 RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+				OUT_RING(dev_priv->front_pitch_offset);
+				OUT_RING(clear->clear_color);
+
+				OUT_RING((x << 16) | y);
+				OUT_RING((w << 16) | h);
+
+				ADVANCE_RING();
+			}
+
+			if (flags & RADEON_BACK) {
+				BEGIN_RING(6);
+
+				OUT_RING(CP_PACKET3
+					 (RADEON_CNTL_PAINT_MULTI, 4));
+				OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+					 RADEON_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->
+					  color_fmt << 8) |
+					 RADEON_GMC_SRC_DATATYPE_COLOR |
+					 RADEON_ROP3_P |
+					 RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+				OUT_RING(dev_priv->back_pitch_offset);
+				OUT_RING(clear->clear_color);
+
+				OUT_RING((x << 16) | y);
+				OUT_RING((w << 16) | h);
+
+				ADVANCE_RING();
+			}
+		}
+	}
+
+	/* hyper z clear */
+	/* no docs available, based on reverse engineering by Stephane Marchesin */
+	if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
+	    && (flags & RADEON_CLEAR_FASTZ)) {
+
+		int i;
+		int depthpixperline =
+		    dev_priv->depth_fmt ==
+		    RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
+						       2) : (dev_priv->
+							     depth_pitch / 4);
+
+		u32 clearmask;
+
+		u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
+		    ((clear->depth_mask & 0xff) << 24);
+
+		/* Make sure we restore the 3D state next time.
+		 * we haven't touched any "normal" state - still need this?
+		 */
+		sarea_priv->ctx_owner = 0;
+
+		if ((dev_priv->flags & RADEON_HAS_HIERZ)
+		    && (flags & RADEON_USE_HIERZ)) {
+			/* FIXME : reverse engineer that for Rx00 cards */
+			/* FIXME : the mask supposedly contains low-res z values. So can't set
+			   just to the max (0xff? or actually 0x3fff?), need to take z clear
+			   value into account? */
+			/* pattern seems to work for r100, though get slight
+			   rendering errors with glxgears. If hierz is not enabled for r100,
+			   only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
+			   other ones are ignored, and the same clear mask can be used. That's
+			   very different behaviour than R200 which needs different clear mask
+			   and different number of tiles to clear if hierz is enabled or not !?!
+			 */
+			clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
+		} else {
+			/* clear mask : chooses the clearing pattern.
+			   rv250: could be used to clear only parts of macrotiles
+			   (but that would get really complicated...)?
+			   bit 0 and 1 (either or both of them ?!?!) are used to
+			   not clear tile (or maybe one of the bits indicates if the tile is
+			   compressed or not), bit 2 and 3 to not clear tile 1,...,.
+			   Pattern is as follows:
+			   | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
+			   bits -------------------------------------------------
+			   | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
+			   rv100: clearmask covers 2x8 4x1 tiles, but one clear still
+			   covers 256 pixels ?!?
+			 */
+			clearmask = 0x0;
+		}
+
+		BEGIN_RING(8);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
+			     tempRB3D_DEPTHCLEARVALUE);
+		/* what offset is this exactly ? */
+		OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
+		/* need ctlstat, otherwise get some strange black flickering */
+		OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
+			     RADEON_RB3D_ZC_FLUSH_ALL);
+		ADVANCE_RING();
+
+		for (i = 0; i < nbox; i++) {
+			int tileoffset, nrtilesx, nrtilesy, j;
+			/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
+			if ((dev_priv->flags & RADEON_HAS_HIERZ)
+			    && !(dev_priv->microcode_version == UCODE_R200)) {
+				/* FIXME : figure this out for r200 (when hierz is enabled). Or
+				   maybe r200 actually doesn't need to put the low-res z value into
+				   the tile cache like r100, but just needs to clear the hi-level z-buffer?
+				   Works for R100, both with hierz and without.
+				   R100 seems to operate on 2x1 8x8 tiles, but...
+				   odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
+				   problematic with resolutions which are not 64 pix aligned? */
+				tileoffset =
+				    ((pbox[i].y1 >> 3) * depthpixperline +
+				     pbox[i].x1) >> 6;
+				nrtilesx =
+				    ((pbox[i].x2 & ~63) -
+				     (pbox[i].x1 & ~63)) >> 4;
+				nrtilesy =
+				    (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					/* first tile */
+					OUT_RING(tileoffset * 8);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 4);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 6;
+				}
+			} else if (dev_priv->microcode_version == UCODE_R200) {
+				/* works for rv250. */
+				/* find first macro tile (8x2 4x4 z-pixels on rv250) */
+				tileoffset =
+				    ((pbox[i].y1 >> 3) * depthpixperline +
+				     pbox[i].x1) >> 5;
+				nrtilesx =
+				    (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
+				nrtilesy =
+				    (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					/* first tile */
+					/* judging by the first tile offset needed, could possibly
+					   directly address/clear 4x4 tiles instead of 8x2 * 4x4
+					   macro tiles, though would still need clear mask for
+					   right/bottom if truly 4x4 granularity is desired ? */
+					OUT_RING(tileoffset * 16);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 1);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 5;
+				}
+			} else {	/* rv 100 */
+				/* rv100 might not need 64 pix alignment, who knows */
+				/* offsets are, hmm, weird */
+				tileoffset =
+				    ((pbox[i].y1 >> 4) * depthpixperline +
+				     pbox[i].x1) >> 6;
+				nrtilesx =
+				    ((pbox[i].x2 & ~63) -
+				     (pbox[i].x1 & ~63)) >> 4;
+				nrtilesy =
+				    (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					OUT_RING(tileoffset * 128);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 4);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 6;
+				}
+			}
+		}
+
+		/* TODO don't always clear all hi-level z tiles */
+		if ((dev_priv->flags & RADEON_HAS_HIERZ)
+		    && (dev_priv->microcode_version == UCODE_R200)
+		    && (flags & RADEON_USE_HIERZ))
+			/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
+			/* FIXME : the mask supposedly contains low-res z values. So can't set
+			   just to the max (0xff? or actually 0x3fff?), need to take z clear
+			   value into account? */
+		{
+			BEGIN_RING(4);
+			OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
+			OUT_RING(0x0);	/* First tile */
+			OUT_RING(0x3cc0);
+			OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
+			ADVANCE_RING();
+		}
+	}
+
+	/* We have to clear the depth and/or stencil buffers by
+	 * rendering a quad into just those buffers.  Thus, we have to
+	 * make sure the 3D engine is configured correctly.
+	 */
+	else if ((dev_priv->microcode_version == UCODE_R200) &&
+		(flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+		int tempPP_CNTL;
+		int tempRE_CNTL;
+		int tempRB3D_CNTL;
+		int tempRB3D_ZSTENCILCNTL;
+		int tempRB3D_STENCILREFMASK;
+		int tempRB3D_PLANEMASK;
+		int tempSE_CNTL;
+		int tempSE_VTE_CNTL;
+		int tempSE_VTX_FMT_0;
+		int tempSE_VTX_FMT_1;
+		int tempSE_VAP_CNTL;
+		int tempRE_AUX_SCISSOR_CNTL;
+
+		tempPP_CNTL = 0;
+		tempRE_CNTL = 0;
+
+		tempRB3D_CNTL = depth_clear->rb3d_cntl;
+
+		tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+		tempRB3D_STENCILREFMASK = 0x0;
+
+		tempSE_CNTL = depth_clear->se_cntl;
+
+		/* Disable TCL */
+
+		tempSE_VAP_CNTL = (	/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK |  */
+					  (0x9 <<
+					   SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
+
+		tempRB3D_PLANEMASK = 0x0;
+
+		tempRE_AUX_SCISSOR_CNTL = 0x0;
+
+		tempSE_VTE_CNTL =
+		    SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
+
+		/* Vertex format (X, Y, Z, W) */
+		tempSE_VTX_FMT_0 =
+		    SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
+		    SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
+		tempSE_VTX_FMT_1 = 0x0;
+
+		/*
+		 * Depth buffer specific enables
+		 */
+		if (flags & RADEON_DEPTH) {
+			/* Enable depth buffer */
+			tempRB3D_CNTL |= RADEON_Z_ENABLE;
+		} else {
+			/* Disable depth buffer */
+			tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
+		}
+
+		/*
+		 * Stencil buffer specific enables
+		 */
+		if (flags & RADEON_STENCIL) {
+			tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
+			tempRB3D_STENCILREFMASK = clear->depth_mask;
+		} else {
+			tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
+			tempRB3D_STENCILREFMASK = 0x00000000;
+		}
+
+		if (flags & RADEON_USE_COMP_ZBUF) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+			    RADEON_Z_DECOMPRESSION_ENABLE;
+		}
+		if (flags & RADEON_USE_HIERZ) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+		}
+
+		BEGIN_RING(26);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+
+		OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
+		OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
+		OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
+		OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+		OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
+			     tempRB3D_STENCILREFMASK);
+		OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
+		OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
+		OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
+		OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
+		OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
+		OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
+		OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+
+			/* Funny that this should be required --
+			 *  sets top-left?
+			 */
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+			BEGIN_RING(14);
+			OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
+			OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+				  RADEON_PRIM_WALK_RING |
+				  (3 << RADEON_NUM_VERTICES_SHIFT)));
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			ADVANCE_RING();
+		}
+	} else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+		int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+
+		rb3d_cntl = depth_clear->rb3d_cntl;
+
+		if (flags & RADEON_DEPTH) {
+			rb3d_cntl |= RADEON_Z_ENABLE;
+		} else {
+			rb3d_cntl &= ~RADEON_Z_ENABLE;
+		}
+
+		if (flags & RADEON_STENCIL) {
+			rb3d_cntl |= RADEON_STENCIL_ENABLE;
+			rb3d_stencilrefmask = clear->depth_mask;	/* misnamed field */
+		} else {
+			rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
+			rb3d_stencilrefmask = 0x00000000;
+		}
+
+		if (flags & RADEON_USE_COMP_ZBUF) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+			    RADEON_Z_DECOMPRESSION_ENABLE;
+		}
+		if (flags & RADEON_USE_HIERZ) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+		}
+
+		BEGIN_RING(13);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+
+		OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
+		OUT_RING(0x00000000);
+		OUT_RING(rb3d_cntl);
+
+		OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+		OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
+		OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
+		OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+
+			/* Funny that this should be required --
+			 *  sets top-left?
+			 */
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+			BEGIN_RING(15);
+
+			OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
+			OUT_RING(RADEON_VTX_Z_PRESENT |
+				 RADEON_VTX_PKCOLOR_PRESENT);
+			OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+				  RADEON_PRIM_WALK_RING |
+				  RADEON_MAOS_ENABLE |
+				  RADEON_VTX_FMT_RADEON_MODE |
+				  (3 << RADEON_NUM_VERTICES_SHIFT)));
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			ADVANCE_RING();
+		}
+	}
+
+	/* Increment the clear counter.  The client-side 3D driver must
+	 * wait on this value before performing the clear ioctl.  We
+	 * need this because the card's so damned fast...
+	 */
+	sarea_priv->last_clear++;
+
+	BEGIN_RING(4);
+
+	RADEON_CLEAR_AGE(sarea_priv->last_clear);
+	RADEON_WAIT_UNTIL_IDLE();
+
+	ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* Do some trivial performance monitoring...
+	 */
+	if (dev_priv->do_boxes)
+		radeon_cp_performance_boxes(dev_priv, master_priv);
+
+	/* Wait for the 3D stream to idle before dispatching the bitblt.
+	 * This will prevent data corruption between the two streams.
+	 */
+	BEGIN_RING(2);
+
+	RADEON_WAIT_UNTIL_3D_IDLE();
+
+	ADVANCE_RING();
+
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
+
+		BEGIN_RING(9);
+
+		OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
+		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_BRUSH_NONE |
+			 (dev_priv->color_fmt << 8) |
+			 RADEON_GMC_SRC_DATATYPE_COLOR |
+			 RADEON_ROP3_S |
+			 RADEON_DP_SRC_SOURCE_MEMORY |
+			 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+
+		/* Make this work even if front & back are flipped:
+		 */
+		OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
+		if (sarea_priv->pfCurrentPage == 0) {
+			OUT_RING(dev_priv->back_pitch_offset);
+			OUT_RING(dev_priv->front_pitch_offset);
+		} else {
+			OUT_RING(dev_priv->front_pitch_offset);
+			OUT_RING(dev_priv->back_pitch_offset);
+		}
+
+		OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
+		OUT_RING((x << 16) | y);
+		OUT_RING((x << 16) | y);
+		OUT_RING((w << 16) | h);
+
+		ADVANCE_RING();
+	}
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	sarea_priv->last_frame++;
+
+	BEGIN_RING(4);
+
+	RADEON_FRAME_AGE(sarea_priv->last_frame);
+	RADEON_WAIT_UNTIL_2D_IDLE();
+
+	ADVANCE_RING();
+}
+
+void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle;
+	int offset = (master_priv->sarea_priv->pfCurrentPage == 1)
+	    ? dev_priv->front_offset : dev_priv->back_offset;
+	RING_LOCALS;
+	DRM_DEBUG("pfCurrentPage=%d\n",
+		  master_priv->sarea_priv->pfCurrentPage);
+
+	/* Do some trivial performance monitoring...
+	 */
+	if (dev_priv->do_boxes) {
+		dev_priv->stats.boxes |= RADEON_BOX_FLIP;
+		radeon_cp_performance_boxes(dev_priv, master_priv);
+	}
+
+	/* Update the frame offsets for both CRTCs
+	 */
+	BEGIN_RING(6);
+
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING_REG(RADEON_CRTC_OFFSET,
+		     ((sarea->frame.y * dev_priv->front_pitch +
+		       sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
+		     + offset);
+	OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base
+		     + offset);
+
+	ADVANCE_RING();
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	master_priv->sarea_priv->last_frame++;
+	master_priv->sarea_priv->pfCurrentPage =
+		1 - master_priv->sarea_priv->pfCurrentPage;
+
+	BEGIN_RING(2);
+
+	RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame);
+
+	ADVANCE_RING();
+}
+
+static int bad_prim_vertex_nr(int primitive, int nr)
+{
+	switch (primitive & RADEON_PRIM_TYPE_MASK) {
+	case RADEON_PRIM_TYPE_NONE:
+	case RADEON_PRIM_TYPE_POINT:
+		return nr < 1;
+	case RADEON_PRIM_TYPE_LINE:
+		return (nr & 1) || nr == 0;
+	case RADEON_PRIM_TYPE_LINE_STRIP:
+		return nr < 2;
+	case RADEON_PRIM_TYPE_TRI_LIST:
+	case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
+	case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
+	case RADEON_PRIM_TYPE_RECT_LIST:
+		return nr % 3 || nr == 0;
+	case RADEON_PRIM_TYPE_TRI_FAN:
+	case RADEON_PRIM_TYPE_TRI_STRIP:
+		return nr < 3;
+	default:
+		return 1;
+	}
+}
+
+typedef struct {
+	unsigned int start;
+	unsigned int finish;
+	unsigned int prim;
+	unsigned int numverts;
+	unsigned int offset;
+	unsigned int vc_format;
+} drm_radeon_tcl_prim_t;
+
+static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+				      struct drm_file *file_priv,
+				      struct drm_buf * buf,
+				      drm_radeon_tcl_prim_t * prim)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
+	int numverts = (int)prim->numverts;
+	int nbox = sarea_priv->nbox;
+	int i = 0;
+	RING_LOCALS;
+
+	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
+		  prim->prim,
+		  prim->vc_format, prim->start, prim->finish, prim->numverts);
+
+	if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
+		DRM_ERROR("bad prim %x numverts %d\n",
+			  prim->prim, prim->numverts);
+		return;
+	}
+
+	do {
+		/* Emit the next cliprect */
+		if (i < nbox) {
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+		}
+
+		/* Emit the vertex buffer rendering commands */
+		BEGIN_RING(5);
+
+		OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
+		OUT_RING(offset);
+		OUT_RING(numverts);
+		OUT_RING(prim->vc_format);
+		OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
+			 RADEON_COLOR_ORDER_RGBA |
+			 RADEON_VTX_FMT_RADEON_MODE |
+			 (numverts << RADEON_NUM_VERTICES_SHIFT));
+
+		ADVANCE_RING();
+
+		i++;
+	} while (i < nbox);
+}
+
+void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+	RING_LOCALS;
+
+	buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
+
+	/* Emit the vertex buffer age */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+		BEGIN_RING(3);
+		R600_DISPATCH_AGE(buf_priv->age);
+		ADVANCE_RING();
+	} else {
+		BEGIN_RING(2);
+		RADEON_DISPATCH_AGE(buf_priv->age);
+		ADVANCE_RING();
+	}
+
+	buf->pending = 1;
+	buf->used = 0;
+}
+
+static void radeon_cp_dispatch_indirect(struct drm_device * dev,
+					struct drm_buf * buf, int start, int end)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+
+	if (start != end) {
+		int offset = (dev_priv->gart_buffers_offset
+			      + buf->offset + start);
+		int dwords = (end - start + 3) / sizeof(u32);
+
+		/* Indirect buffer data must be an even number of
+		 * dwords, so if we've been given an odd number we must
+		 * pad the data with a Type-2 CP packet.
+		 */
+		if (dwords & 1) {
+			u32 *data = (u32 *)
+			    ((char *)dev->agp_buffer_map->handle
+			     + buf->offset + start);
+			data[dwords++] = RADEON_CP_PACKET2;
+		}
+
+		/* Fire off the indirect buffer */
+		BEGIN_RING(3);
+
+		OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
+		OUT_RING(offset);
+		OUT_RING(dwords);
+
+		ADVANCE_RING();
+	}
+}
+
+static void radeon_cp_dispatch_indices(struct drm_device *dev,
+				       struct drm_master *master,
+				       struct drm_buf * elt_buf,
+				       drm_radeon_tcl_prim_t * prim)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	int offset = dev_priv->gart_buffers_offset + prim->offset;
+	u32 *data;
+	int dwords;
+	int i = 0;
+	int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
+	int count = (prim->finish - start) / sizeof(u16);
+	int nbox = sarea_priv->nbox;
+
+	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
+		  prim->prim,
+		  prim->vc_format,
+		  prim->start, prim->finish, prim->offset, prim->numverts);
+
+	if (bad_prim_vertex_nr(prim->prim, count)) {
+		DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
+		return;
+	}
+
+	if (start >= prim->finish || (prim->start & 0x7)) {
+		DRM_ERROR("buffer prim %d\n", prim->prim);
+		return;
+	}
+
+	dwords = (prim->finish - prim->start + 3) / sizeof(u32);
+
+	data = (u32 *) ((char *)dev->agp_buffer_map->handle +
+			elt_buf->offset + prim->start);
+
+	data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
+	data[1] = offset;
+	data[2] = prim->numverts;
+	data[3] = prim->vc_format;
+	data[4] = (prim->prim |
+		   RADEON_PRIM_WALK_IND |
+		   RADEON_COLOR_ORDER_RGBA |
+		   RADEON_VTX_FMT_RADEON_MODE |
+		   (count << RADEON_NUM_VERTICES_SHIFT));
+
+	do {
+		if (i < nbox)
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+		radeon_cp_dispatch_indirect(dev, elt_buf,
+					    prim->start, prim->finish);
+
+		i++;
+	} while (i < nbox);
+
+}
+
+#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
+
+static int radeon_cp_dispatch_texture(struct drm_device * dev,
+				      struct drm_file *file_priv,
+				      drm_radeon_texture_t * tex,
+				      drm_radeon_tex_image_t * image)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	u32 format;
+	u32 *buffer;
+	const u8 __user *data;
+	int size, dwords, tex_width, blit_width, spitch;
+	u32 height;
+	int i;
+	u32 texpitch, microtile;
+	u32 offset, byte_offset;
+	RING_LOCALS;
+
+	if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
+		DRM_ERROR("Invalid destination offset\n");
+		return -EINVAL;
+	}
+
+	dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
+
+	/* Flush the pixel cache.  This ensures no pixel data gets mixed
+	 * up with the texture data from the host data blit, otherwise
+	 * part of the texture image may be corrupted.
+	 */
+	BEGIN_RING(4);
+	RADEON_FLUSH_CACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+	ADVANCE_RING();
+
+	/* The compiler won't optimize away a division by a variable,
+	 * even if the only legal values are powers of two.  Thus, we'll
+	 * use a shift instead.
+	 */
+	switch (tex->format) {
+	case RADEON_TXFORMAT_ARGB8888:
+	case RADEON_TXFORMAT_RGBA8888:
+		format = RADEON_COLOR_FORMAT_ARGB8888;
+		tex_width = tex->width * 4;
+		blit_width = image->width * 4;
+		break;
+	case RADEON_TXFORMAT_AI88:
+	case RADEON_TXFORMAT_ARGB1555:
+	case RADEON_TXFORMAT_RGB565:
+	case RADEON_TXFORMAT_ARGB4444:
+	case RADEON_TXFORMAT_VYUY422:
+	case RADEON_TXFORMAT_YVYU422:
+		format = RADEON_COLOR_FORMAT_RGB565;
+		tex_width = tex->width * 2;
+		blit_width = image->width * 2;
+		break;
+	case RADEON_TXFORMAT_I8:
+	case RADEON_TXFORMAT_RGB332:
+		format = RADEON_COLOR_FORMAT_CI8;
+		tex_width = tex->width * 1;
+		blit_width = image->width * 1;
+		break;
+	default:
+		DRM_ERROR("invalid texture format %d\n", tex->format);
+		return -EINVAL;
+	}
+	spitch = blit_width >> 6;
+	if (spitch == 0 && image->height > 1)
+		return -EINVAL;
+
+	texpitch = tex->pitch;
+	if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
+		microtile = 1;
+		if (tex_width < 64) {
+			texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
+			/* we got tiled coordinates, untile them */
+			image->x *= 2;
+		}
+	} else
+		microtile = 0;
+
+	/* this might fail for zero-sized uploads - are those illegal? */
+	if (!radeon_check_offset(dev_priv, tex->offset + image->height *
+				blit_width - 1)) {
+		DRM_ERROR("Invalid final destination offset\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
+
+	do {
+		DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%u y=%u w=%u h=%u\n",
+			  tex->offset >> 10, tex->pitch, tex->format,
+			  image->x, image->y, image->width, image->height);
+
+		/* Make a copy of some parameters in case we have to
+		 * update them for a multi-pass texture blit.
+		 */
+		height = image->height;
+		data = (const u8 __user *)image->data;
+
+		size = height * blit_width;
+
+		if (size > RADEON_MAX_TEXTURE_SIZE) {
+			height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+			size = height * blit_width;
+		} else if (size < 4 && size > 0) {
+			size = 4;
+		} else if (size == 0) {
+			return 0;
+		}
+
+		buf = radeon_freelist_get(dev);
+		if (0 && !buf) {
+			radeon_do_cp_idle(dev_priv);
+			buf = radeon_freelist_get(dev);
+		}
+		if (!buf) {
+			DRM_DEBUG("EAGAIN\n");
+			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+				return -EFAULT;
+			return -EAGAIN;
+		}
+
+		/* Dispatch the indirect buffer.
+		 */
+		buffer =
+		    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+		dwords = size / 4;
+
+#define RADEON_COPY_MT(_buf, _data, _width) \
+	do { \
+		if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+			DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
+			return -EFAULT; \
+		} \
+	} while(0)
+
+		if (microtile) {
+			/* texture micro tiling in use, minimum texture width is thus 16 bytes.
+			   however, we cannot use blitter directly for texture width < 64 bytes,
+			   since minimum tex pitch is 64 bytes and we need this to match
+			   the texture width, otherwise the blitter will tile it wrong.
+			   Thus, tiling manually in this case. Additionally, need to special
+			   case tex height = 1, since our actual image will have height 2
+			   and we need to ensure we don't read beyond the texture size
+			   from user space. */
+			if (tex->height == 1) {
+				if (tex_width >= 64 || tex_width <= 16) {
+					RADEON_COPY_MT(buffer, data,
+						(int)(tex_width * sizeof(u32)));
+				} else if (tex_width == 32) {
+					RADEON_COPY_MT(buffer, data, 16);
+					RADEON_COPY_MT(buffer + 8,
+						       data + 16, 16);
+				}
+			} else if (tex_width >= 64 || tex_width == 16) {
+				RADEON_COPY_MT(buffer, data,
+					       (int)(dwords * sizeof(u32)));
+			} else if (tex_width < 16) {
+				for (i = 0; i < tex->height; i++) {
+					RADEON_COPY_MT(buffer, data, tex_width);
+					buffer += 4;
+					data += tex_width;
+				}
+			} else if (tex_width == 32) {
+				/* TODO: make sure this works when not fitting in one buffer
+				   (i.e. 32bytes x 2048...) */
+				for (i = 0; i < tex->height; i += 2) {
+					RADEON_COPY_MT(buffer, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 8, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 4, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 12, data, 16);
+					data += 16;
+					buffer += 16;
+				}
+			}
+		} else {
+			if (tex_width >= 32) {
+				/* Texture image width is larger than the minimum, so we
+				 * can upload it directly.
+				 */
+				RADEON_COPY_MT(buffer, data,
+					       (int)(dwords * sizeof(u32)));
+			} else {
+				/* Texture image width is less than the minimum, so we
+				 * need to pad out each image scanline to the minimum
+				 * width.
+				 */
+				for (i = 0; i < tex->height; i++) {
+					RADEON_COPY_MT(buffer, data, tex_width);
+					buffer += 8;
+					data += tex_width;
+				}
+			}
+		}
+
+#undef RADEON_COPY_MT
+		byte_offset = (image->y & ~2047) * blit_width;
+		buf->file_priv = file_priv;
+		buf->used = size;
+		offset = dev_priv->gart_buffers_offset + buf->offset;
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
+		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_BRUSH_NONE |
+			 (format << 8) |
+			 RADEON_GMC_SRC_DATATYPE_COLOR |
+			 RADEON_ROP3_S |
+			 RADEON_DP_SRC_SOURCE_MEMORY |
+			 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+		OUT_RING((spitch << 22) | (offset >> 10));
+		OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
+		OUT_RING(0);
+		OUT_RING((image->x << 16) | (image->y % 2048));
+		OUT_RING((image->width << 16) | height);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		ADVANCE_RING();
+		COMMIT_RING();
+
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+
+		/* Update the input parameters for next time */
+		image->y += height;
+		image->height -= height;
+		image->data = (const u8 __user *)image->data + size;
+	} while (image->height > 0);
+
+	/* Flush the pixel cache after the blit completes.  This ensures
+	 * the texture data is written out to memory before rendering
+	 * continues.
+	 */
+	BEGIN_RING(4);
+	RADEON_FLUSH_CACHE();
+	RADEON_WAIT_UNTIL_2D_IDLE();
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return 0;
+}
+
+static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(35);
+
+	OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
+	OUT_RING(0x00000000);
+
+	OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
+	for (i = 0; i < 32; i++) {
+		OUT_RING(stipple[i]);
+	}
+
+	ADVANCE_RING();
+}
+
+static void radeon_apply_surface_regs(int surf_index,
+				      drm_radeon_private_t *dev_priv)
+{
+	if (!dev_priv->mmio)
+		return;
+
+	radeon_do_cp_idle(dev_priv);
+
+	RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].flags);
+	RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].lower);
+	RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].upper);
+}
+
+/* Allocates a virtual surface
+ * doesn't always allocate a real surface, will stretch an existing
+ * surface when possible.
+ *
+ * Note that refcount can be at most 2, since during a free refcount=3
+ * might mean we have to allocate a new surface which might not always
+ * be available.
+ * For example : we allocate three contiguous surfaces ABC. If B is
+ * freed, we suddenly need two surfaces to store A and C, which might
+ * not always be available.
+ */
+static int alloc_surface(drm_radeon_surface_alloc_t *new,
+			 drm_radeon_private_t *dev_priv,
+			 struct drm_file *file_priv)
+{
+	struct radeon_virt_surface *s;
+	int i;
+	int virt_surface_index;
+	uint32_t new_upper, new_lower;
+
+	new_lower = new->address;
+	new_upper = new_lower + new->size - 1;
+
+	/* sanity check */
+	if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
+	    ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
+	     RADEON_SURF_ADDRESS_FIXED_MASK)
+	    || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
+		return -1;
+
+	/* make sure there is no overlap with existing surfaces */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		if ((dev_priv->surfaces[i].refcount != 0) &&
+		    (((new_lower >= dev_priv->surfaces[i].lower) &&
+		      (new_lower < dev_priv->surfaces[i].upper)) ||
+		     ((new_lower < dev_priv->surfaces[i].lower) &&
+		      (new_upper > dev_priv->surfaces[i].lower)))) {
+			return -1;
+		}
+	}
+
+	/* find a virtual surface */
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
+		if (dev_priv->virt_surfaces[i].file_priv == NULL)
+			break;
+	if (i == 2 * RADEON_MAX_SURFACES) {
+		return -1;
+	}
+	virt_surface_index = i;
+
+	/* try to reuse an existing surface */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		/* extend before */
+		if ((dev_priv->surfaces[i].refcount == 1) &&
+		    (new->flags == dev_priv->surfaces[i].flags) &&
+		    (new_upper + 1 == dev_priv->surfaces[i].lower)) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount++;
+			dev_priv->surfaces[i].lower = s->lower;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+
+		/* extend after */
+		if ((dev_priv->surfaces[i].refcount == 1) &&
+		    (new->flags == dev_priv->surfaces[i].flags) &&
+		    (new_lower == dev_priv->surfaces[i].upper + 1)) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount++;
+			dev_priv->surfaces[i].upper = s->upper;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+	}
+
+	/* okay, we need a new one */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		if (dev_priv->surfaces[i].refcount == 0) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount = 1;
+			dev_priv->surfaces[i].lower = s->lower;
+			dev_priv->surfaces[i].upper = s->upper;
+			dev_priv->surfaces[i].flags = s->flags;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+	}
+
+	/* we didn't find anything */
+	return -1;
+}
+
+static int free_surface(struct drm_file *file_priv,
+			drm_radeon_private_t * dev_priv,
+			int lower)
+{
+	struct radeon_virt_surface *s;
+	int i;
+	/* find the virtual surface */
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+		s = &(dev_priv->virt_surfaces[i]);
+		if (s->file_priv) {
+			if ((lower == s->lower) && (file_priv == s->file_priv))
+			{
+				if (dev_priv->surfaces[s->surface_index].
+				    lower == s->lower)
+					dev_priv->surfaces[s->surface_index].
+					    lower = s->upper;
+
+				if (dev_priv->surfaces[s->surface_index].
+				    upper == s->upper)
+					dev_priv->surfaces[s->surface_index].
+					    upper = s->lower;
+
+				dev_priv->surfaces[s->surface_index].refcount--;
+				if (dev_priv->surfaces[s->surface_index].
+				    refcount == 0)
+					dev_priv->surfaces[s->surface_index].
+					    flags = 0;
+				s->file_priv = NULL;
+				radeon_apply_surface_regs(s->surface_index,
+							  dev_priv);
+				return 0;
+			}
+		}
+	}
+	return 1;
+}
+
+static void radeon_surfaces_release(struct drm_file *file_priv,
+				    drm_radeon_private_t * dev_priv)
+{
+	int i;
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+		if (dev_priv->virt_surfaces[i].file_priv == file_priv)
+			free_surface(file_priv, dev_priv,
+				     dev_priv->virt_surfaces[i].lower);
+	}
+}
+
+/* ================================================================
+ * IOCTL functions
+ */
+static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_surface_alloc_t *alloc = data;
+
+	if (alloc_surface(alloc, dev_priv, file_priv) == -1)
+		return -EINVAL;
+	else
+		return 0;
+}
+
+static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_surface_free_t *memfree = data;
+
+	if (free_surface(file_priv, dev_priv, memfree->address))
+		return -EINVAL;
+	else
+		return 0;
+}
+
+static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	drm_radeon_clear_t *clear = data;
+	drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+	if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+			       sarea_priv->nbox * sizeof(depth_boxes[0])))
+		return -EFAULT;
+
+	radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes);
+
+	COMMIT_RING();
+	return 0;
+}
+
+/* Not sure why this isn't set all the time:
+ */
+static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(6);
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
+	OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
+		 RADEON_CRTC_OFFSET_FLIP_CNTL);
+	OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
+	OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
+		 RADEON_CRTC_OFFSET_FLIP_CNTL);
+	ADVANCE_RING();
+
+	dev_priv->page_flipping = 1;
+
+	if (master_priv->sarea_priv->pfCurrentPage != 1)
+		master_priv->sarea_priv->pfCurrentPage = 0;
+
+	return 0;
+}
+
+/* Swapping and flipping are different operations, need different ioctls.
+ * They can & should be intermixed to support multiple 3d windows.
+ */
+static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (!dev_priv->page_flipping)
+		radeon_do_init_pageflip(dev, file_priv->master);
+
+	radeon_cp_dispatch_flip(dev, file_priv->master);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_cp_dispatch_swap(dev, file_priv);
+	else
+		radeon_cp_dispatch_swap(dev, file_priv->master);
+	sarea_priv->ctx_owner = 0;
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_vertex_t *vertex = data;
+	drm_radeon_tcl_prim_t prim;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	sarea_priv = master_priv->sarea_priv;
+
+	DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
+		  DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  vertex->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+		DRM_ERROR("buffer prim %d\n", vertex->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[vertex->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+		return -EINVAL;
+	}
+
+	/* Build up a prim_t record:
+	 */
+	if (vertex->count) {
+		buf->used = vertex->count;	/* not used? */
+
+		if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+			if (radeon_emit_state(dev_priv, file_priv,
+					      &sarea_priv->context_state,
+					      sarea_priv->tex_state,
+					      sarea_priv->dirty)) {
+				DRM_ERROR("radeon_emit_state failed\n");
+				return -EINVAL;
+			}
+
+			sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+					       RADEON_UPLOAD_TEX1IMAGES |
+					       RADEON_UPLOAD_TEX2IMAGES |
+					       RADEON_REQUIRE_QUIESCENCE);
+		}
+
+		prim.start = 0;
+		prim.finish = vertex->count;	/* unused */
+		prim.prim = vertex->prim;
+		prim.numverts = vertex->count;
+		prim.vc_format = sarea_priv->vc_format;
+
+		radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim);
+	}
+
+	if (vertex->discard) {
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_indices_t *elts = data;
+	drm_radeon_tcl_prim_t prim;
+	int count;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	sarea_priv = master_priv->sarea_priv;
+
+	DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
+		  DRM_CURRENTPID, elts->idx, elts->start, elts->end,
+		  elts->discard);
+
+	if (elts->idx < 0 || elts->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  elts->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+		DRM_ERROR("buffer prim %d\n", elts->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[elts->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", elts->idx);
+		return -EINVAL;
+	}
+
+	count = (elts->end - elts->start) / sizeof(u16);
+	elts->start -= RADEON_INDEX_PRIM_OFFSET;
+
+	if (elts->start & 0x7) {
+		DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
+		return -EINVAL;
+	}
+	if (elts->start < buf->used) {
+		DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
+		return -EINVAL;
+	}
+
+	buf->used = elts->end;
+
+	if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+		if (radeon_emit_state(dev_priv, file_priv,
+				      &sarea_priv->context_state,
+				      sarea_priv->tex_state,
+				      sarea_priv->dirty)) {
+			DRM_ERROR("radeon_emit_state failed\n");
+			return -EINVAL;
+		}
+
+		sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+				       RADEON_UPLOAD_TEX1IMAGES |
+				       RADEON_UPLOAD_TEX2IMAGES |
+				       RADEON_REQUIRE_QUIESCENCE);
+	}
+
+	/* Build up a prim_t record:
+	 */
+	prim.start = elts->start;
+	prim.finish = elts->end;
+	prim.prim = elts->prim;
+	prim.offset = 0;	/* offset from start of dma buffers */
+	prim.numverts = RADEON_MAX_VB_VERTS;	/* duh */
+	prim.vc_format = sarea_priv->vc_format;
+
+	radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim);
+	if (elts->discard) {
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_texture_t *tex = data;
+	drm_radeon_tex_image_t image;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (tex->image == NULL) {
+		DRM_ERROR("null texture image!\n");
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_FROM_USER(&image,
+			       (drm_radeon_tex_image_t __user *) tex->image,
+			       sizeof(image)))
+		return -EFAULT;
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		ret = r600_cp_dispatch_texture(dev, file_priv, tex, &image);
+	else
+		ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
+
+	return ret;
+}
+
+static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_stipple_t *stipple = data;
+	u32 mask[32];
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+		return -EFAULT;
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	radeon_cp_dispatch_stipple(dev, mask);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_indirect_t *indirect = data;
+	RING_LOCALS;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
+		  indirect->idx, indirect->start, indirect->end,
+		  indirect->discard);
+
+	if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  indirect->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	buf = dma->buflist[indirect->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", indirect->idx);
+		return -EINVAL;
+	}
+
+	if (indirect->start < buf->used) {
+		DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
+			  indirect->start, buf->used);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf->used = indirect->end;
+
+	/* Dispatch the indirect buffer full of commands from the
+	 * X server.  This is insecure and is thus only available to
+	 * privileged clients.
+	 */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+	else {
+		/* Wait for the 3D stream to idle before the indirect buffer
+		 * containing 2D acceleration commands is processed.
+		 */
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_3D_IDLE();
+		ADVANCE_RING();
+		radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+	}
+
+	if (indirect->discard) {
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_vertex2_t *vertex = data;
+	int i;
+	unsigned char laststate;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	sarea_priv = master_priv->sarea_priv;
+
+	DRM_DEBUG("pid=%d index=%d discard=%d\n",
+		  DRM_CURRENTPID, vertex->idx, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  vertex->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[vertex->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+		return -EINVAL;
+	}
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		return -EINVAL;
+
+	for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
+		drm_radeon_prim_t prim;
+		drm_radeon_tcl_prim_t tclprim;
+
+		if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+			return -EFAULT;
+
+		if (prim.stateidx != laststate) {
+			drm_radeon_state_t state;
+
+			if (DRM_COPY_FROM_USER(&state,
+					       &vertex->state[prim.stateidx],
+					       sizeof(state)))
+				return -EFAULT;
+
+			if (radeon_emit_state2(dev_priv, file_priv, &state)) {
+				DRM_ERROR("radeon_emit_state2 failed\n");
+				return -EINVAL;
+			}
+
+			laststate = prim.stateidx;
+		}
+
+		tclprim.start = prim.start;
+		tclprim.finish = prim.finish;
+		tclprim.prim = prim.prim;
+		tclprim.vc_format = prim.vc_format;
+
+		if (prim.prim & RADEON_PRIM_WALK_IND) {
+			tclprim.offset = prim.numverts * 64;
+			tclprim.numverts = RADEON_MAX_VB_VERTS;	/* duh */
+
+			radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim);
+		} else {
+			tclprim.numverts = prim.numverts;
+			tclprim.offset = 0;	/* not used */
+
+			radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim);
+		}
+
+		if (sarea_priv->nbox == 1)
+			sarea_priv->nbox = 0;
+	}
+
+	if (vertex->discard) {
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
+			       struct drm_file *file_priv,
+			       drm_radeon_cmd_header_t header,
+			       drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int id = (int)header.packet.packet_id;
+	int sz, reg;
+	RING_LOCALS;
+
+	if (id >= RADEON_MAX_STATE_PACKETS)
+		return -EINVAL;
+
+	sz = packet[id].len;
+	reg = packet[id].start;
+
+	if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
+		DRM_ERROR("Packet size provided larger than data provided\n");
+		return -EINVAL;
+	}
+
+	if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
+				cmdbuf->buffer)) {
+		DRM_ERROR("Packet verification failed\n");
+		return -EINVAL;
+	}
+
+	BEGIN_RING(sz + 1);
+	OUT_RING(CP_PACKET0(reg, (sz - 1)));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.scalars.count;
+	int start = header.scalars.offset;
+	int stride = header.scalars.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(3 + sz);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+	return 0;
+}
+
+/* God this is ugly
+ */
+static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
+					   drm_radeon_cmd_header_t header,
+					   drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.scalars.count;
+	int start = ((unsigned int)header.scalars.offset) + 0x100;
+	int stride = header.scalars.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(3 + sz);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+	return 0;
+}
+
+static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.vectors.count;
+	int start = header.vectors.offset;
+	int stride = header.vectors.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(5 + sz);
+	OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.veclinear.count * 4;
+	int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
+	RING_LOCALS;
+
+        if (!sz)
+                return 0;
+	if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+                return -EINVAL;
+
+	BEGIN_RING(5 + sz);
+	OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+	OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static int radeon_emit_packet3(struct drm_device * dev,
+			       struct drm_file *file_priv,
+			       drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	unsigned int cmdsz;
+	int ret;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
+						  cmdbuf, &cmdsz))) {
+		DRM_ERROR("Packet verification failed\n");
+		return ret;
+	}
+
+	BEGIN_RING(cmdsz);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static int radeon_emit_packet3_cliprect(struct drm_device *dev,
+					struct drm_file *file_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					int orig_nbox)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect box;
+	unsigned int cmdsz;
+	int ret;
+	struct drm_clip_rect __user *boxes = cmdbuf->boxes;
+	int i = 0;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
+						  cmdbuf, &cmdsz))) {
+		DRM_ERROR("Packet verification failed\n");
+		return ret;
+	}
+
+	if (!orig_nbox)
+		goto out;
+
+	do {
+		if (i < cmdbuf->nbox) {
+			if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
+				return -EFAULT;
+			/* FIXME The second and subsequent times round
+			 * this loop, send a WAIT_UNTIL_3D_IDLE before
+			 * calling emit_clip_rect(). This fixes a
+			 * lockup on fast machines when sending
+			 * several cliprects with a cmdbuf, as when
+			 * waving a 2D window over a 3D
+			 * window. Something in the commands from user
+			 * space seems to hang the card when they're
+			 * sent several times in a row. That would be
+			 * the correct place to fix it but this works
+			 * around it until I can figure that out - Tim
+			 * Smith */
+			if (i) {
+				BEGIN_RING(2);
+				RADEON_WAIT_UNTIL_3D_IDLE();
+				ADVANCE_RING();
+			}
+			radeon_emit_clip_rect(dev_priv, &box);
+		}
+
+		BEGIN_RING(cmdsz);
+		OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
+		ADVANCE_RING();
+
+	} while (++i < cmdbuf->nbox);
+	if (cmdbuf->nbox == 1)
+		cmdbuf->nbox = 0;
+
+	return 0;
+      out:
+	drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
+	return 0;
+}
+
+static int radeon_emit_wait(struct drm_device * dev, int flags)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	DRM_DEBUG("%x\n", flags);
+	switch (flags) {
+	case RADEON_WAIT_2D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		ADVANCE_RING();
+		break;
+	case RADEON_WAIT_3D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_3D_IDLE();
+		ADVANCE_RING();
+		break;
+	case RADEON_WAIT_2D | RADEON_WAIT_3D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_IDLE();
+		ADVANCE_RING();
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf = NULL;
+	drm_radeon_cmd_header_t stack_header;
+	int idx;
+	drm_radeon_kcmd_buffer_t *cmdbuf = data;
+	int orig_nbox;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
+		return -EINVAL;
+	}
+
+	/* Allocate an in-kernel area and copy in the cmdbuf.  Do this to avoid
+	 * races between checking values and using those values in other code,
+	 * and simply to avoid a lot of function calls to copy in data.
+	 */
+	if (cmdbuf->bufsz != 0) {
+		int rv;
+		void __user *buffer = cmdbuf->buffer;
+		rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
+		if (rv)
+			return rv;
+		rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
+						cmdbuf->bufsz);
+		if (rv) {
+			drm_buffer_free(cmdbuf->buffer);
+			return rv;
+		}
+	} else
+		goto done;
+
+	orig_nbox = cmdbuf->nbox;
+
+	if (dev_priv->microcode_version == UCODE_R300) {
+		int temp;
+		temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
+
+		drm_buffer_free(cmdbuf->buffer);
+
+		return temp;
+	}
+
+	/* microcode_version != r300 */
+	while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
+
+		drm_radeon_cmd_header_t *header;
+		header = drm_buffer_read_object(cmdbuf->buffer,
+				sizeof(stack_header), &stack_header);
+
+		switch (header->header.cmd_type) {
+		case RADEON_CMD_PACKET:
+			DRM_DEBUG("RADEON_CMD_PACKET\n");
+			if (radeon_emit_packets
+			    (dev_priv, file_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_packets failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_SCALARS:
+			DRM_DEBUG("RADEON_CMD_SCALARS\n");
+			if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_scalars failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_VECTORS:
+			DRM_DEBUG("RADEON_CMD_VECTORS\n");
+			if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_vectors failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_DMA_DISCARD:
+			DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+			idx = header->dma.buf_idx;
+			if (idx < 0 || idx >= dma->buf_count) {
+				DRM_ERROR("buffer index %d (of %d max)\n",
+					  idx, dma->buf_count - 1);
+				goto err;
+			}
+
+			buf = dma->buflist[idx];
+			if (buf->file_priv != file_priv || buf->pending) {
+				DRM_ERROR("bad buffer %p %p %d\n",
+					  buf->file_priv, file_priv,
+					  buf->pending);
+				goto err;
+			}
+
+			radeon_cp_discard_buffer(dev, file_priv->master, buf);
+			break;
+
+		case RADEON_CMD_PACKET3:
+			DRM_DEBUG("RADEON_CMD_PACKET3\n");
+			if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
+				DRM_ERROR("radeon_emit_packet3 failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_PACKET3_CLIP:
+			DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
+			if (radeon_emit_packet3_cliprect
+			    (dev, file_priv, cmdbuf, orig_nbox)) {
+				DRM_ERROR("radeon_emit_packet3_clip failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_SCALARS2:
+			DRM_DEBUG("RADEON_CMD_SCALARS2\n");
+			if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_scalars2 failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_WAIT:
+			DRM_DEBUG("RADEON_CMD_WAIT\n");
+			if (radeon_emit_wait(dev, header->wait.flags)) {
+				DRM_ERROR("radeon_emit_wait failed\n");
+				goto err;
+			}
+			break;
+		case RADEON_CMD_VECLINEAR:
+			DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
+			if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_veclinear failed\n");
+				goto err;
+			}
+			break;
+
+		default:
+			DRM_ERROR("bad cmd_type %d at byte %d\n",
+				  header->header.cmd_type,
+				  cmdbuf->buffer->iterator);
+			goto err;
+		}
+	}
+
+	drm_buffer_free(cmdbuf->buffer);
+
+      done:
+	DRM_DEBUG("DONE\n");
+	COMMIT_RING();
+	return 0;
+
+      err:
+	drm_buffer_free(cmdbuf->buffer);
+	return -EINVAL;
+}
+
+static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_getparam_t *param = data;
+	int value;
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	switch (param->param) {
+	case RADEON_PARAM_GART_BUFFER_OFFSET:
+		value = dev_priv->gart_buffers_offset;
+		break;
+	case RADEON_PARAM_LAST_FRAME:
+		dev_priv->stats.last_frame_reads++;
+		value = GET_SCRATCH(dev_priv, 0);
+		break;
+	case RADEON_PARAM_LAST_DISPATCH:
+		value = GET_SCRATCH(dev_priv, 1);
+		break;
+	case RADEON_PARAM_LAST_CLEAR:
+		dev_priv->stats.last_clear_reads++;
+		value = GET_SCRATCH(dev_priv, 2);
+		break;
+	case RADEON_PARAM_IRQ_NR:
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			value = 0;
+		else
+			value = dev->irq;
+		break;
+	case RADEON_PARAM_GART_BASE:
+		value = dev_priv->gart_vm_start;
+		break;
+	case RADEON_PARAM_REGISTER_HANDLE:
+		value = dev_priv->mmio->offset;
+		break;
+	case RADEON_PARAM_STATUS_HANDLE:
+		value = dev_priv->ring_rptr_offset;
+		break;
+#if BITS_PER_LONG == 32
+		/*
+		 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
+		 * pointer which can't fit into an int-sized variable.  According to
+		 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
+		 * not supporting it shouldn't be a problem.  If the same functionality
+		 * is needed on 64-bit platforms, a new ioctl() would have to be added,
+		 * so backwards-compatibility for the embedded platforms can be
+		 * maintained.  --davidm 4-Feb-2004.
+		 */
+	case RADEON_PARAM_SAREA_HANDLE:
+		/* The lock is the first dword in the sarea. */
+		/* no users of this parameter */
+		break;
+#endif
+	case RADEON_PARAM_GART_TEX_HANDLE:
+		value = dev_priv->gart_textures_offset;
+		break;
+	case RADEON_PARAM_SCRATCH_OFFSET:
+		if (!dev_priv->writeback_works)
+			return -EINVAL;
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			value = R600_SCRATCH_REG_OFFSET;
+		else
+			value = RADEON_SCRATCH_REG_OFFSET;
+		break;
+	case RADEON_PARAM_CARD_TYPE:
+		if (dev_priv->flags & RADEON_IS_PCIE)
+			value = RADEON_CARD_PCIE;
+		else if (dev_priv->flags & RADEON_IS_AGP)
+			value = RADEON_CARD_AGP;
+		else
+			value = RADEON_CARD_PCI;
+		break;
+	case RADEON_PARAM_VBLANK_CRTC:
+		value = radeon_vblank_crtc_get(dev);
+		break;
+	case RADEON_PARAM_FB_LOCATION:
+		value = radeon_read_fb_location(dev_priv);
+		break;
+	case RADEON_PARAM_NUM_GB_PIPES:
+		value = dev_priv->num_gb_pipes;
+		break;
+	case RADEON_PARAM_NUM_Z_PIPES:
+		value = dev_priv->num_z_pipes;
+		break;
+	default:
+		DRM_DEBUG("Invalid parameter %d\n", param->param);
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_setparam_t *sp = data;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	switch (sp->param) {
+	case RADEON_SETPARAM_FB_LOCATION:
+		radeon_priv = file_priv->driver_priv;
+		radeon_priv->radeon_fb_delta = dev_priv->fb_location -
+		    sp->value;
+		break;
+	case RADEON_SETPARAM_SWITCH_TILING:
+		if (sp->value == 0) {
+			DRM_DEBUG("color tiling disabled\n");
+			dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+			dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+			if (master_priv->sarea_priv)
+				master_priv->sarea_priv->tiling_enabled = 0;
+		} else if (sp->value == 1) {
+			DRM_DEBUG("color tiling enabled\n");
+			dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
+			dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
+			if (master_priv->sarea_priv)
+				master_priv->sarea_priv->tiling_enabled = 1;
+		}
+		break;
+	case RADEON_SETPARAM_PCIGART_LOCATION:
+		dev_priv->pcigart_offset = sp->value;
+		dev_priv->pcigart_offset_set = 1;
+		break;
+	case RADEON_SETPARAM_NEW_MEMMAP:
+		dev_priv->new_memmap = sp->value;
+		break;
+	case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
+		dev_priv->gart_info.table_size = sp->value;
+		if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
+			dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+		break;
+	case RADEON_SETPARAM_VBLANK_CRTC:
+		return radeon_vblank_crtc_set(dev, sp->value);
+		break;
+	default:
+		DRM_DEBUG("Invalid parameter %d\n", sp->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* When a client dies:
+ *    - Check for and clean up flipped page state
+ *    - Free any alloced GART memory.
+ *    - Free any alloced radeon surfaces.
+ *
+ * DRM infrastructure takes care of reclaiming dma buffers.
+ */
+void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	if (dev->dev_private) {
+		drm_radeon_private_t *dev_priv = dev->dev_private;
+		dev_priv->page_flipping = 0;
+		radeon_mem_release(file_priv, dev_priv->gart_heap);
+		radeon_mem_release(file_priv, dev_priv->fb_heap);
+		radeon_surfaces_release(file_priv, dev_priv);
+	}
+}
+
+void radeon_driver_lastclose(struct drm_device *dev)
+{
+	radeon_surfaces_release(PCIGART_FILE_PRIV, dev->dev_private);
+	radeon_do_release(dev);
+}
+
+int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	DRM_DEBUG("\n");
+	radeon_priv = malloc(sizeof(*radeon_priv), DRM_MEM_DRIVER, M_NOWAIT);
+
+	if (!radeon_priv)
+		return -ENOMEM;
+
+	file_priv->driver_priv = radeon_priv;
+
+	if (dev_priv)
+		radeon_priv->radeon_fb_delta = dev_priv->fb_location;
+	else
+		radeon_priv->radeon_fb_delta = 0;
+	return 0;
+}
+
+void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_radeon_driver_file_fields *radeon_priv =
+	    file_priv->driver_priv;
+
+	free(radeon_priv, DRM_MEM_DRIVER);
+}
+
+struct drm_ioctl_desc radeon_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+};
+
+int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls);


Property changes on: trunk/sys/dev/drm2/radeon/radeon_state.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_test.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_test.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_test.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,510 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Michel Dänzer
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_test.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA  0
+
+
+/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
+{
+	struct radeon_bo *vram_obj = NULL;
+	struct radeon_bo **gtt_obj = NULL;
+	struct radeon_fence *fence = NULL;
+	uint64_t gtt_addr, vram_addr;
+	unsigned i, n, size;
+	int r, ring;
+
+	switch (flag) {
+	case RADEON_TEST_COPY_DMA:
+		ring = radeon_copy_dma_ring_index(rdev);
+		break;
+	case RADEON_TEST_COPY_BLIT:
+		ring = radeon_copy_blit_ring_index(rdev);
+		break;
+	default:
+		DRM_ERROR("Unknown copy method\n");
+		return;
+	}
+
+	size = 1024 * 1024;
+
+	/* Number of tests =
+	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
+	 */
+	n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+	for (i = 0; i < RADEON_NUM_RINGS; ++i)
+		n -= rdev->ring[i].ring_size;
+	if (rdev->wb.wb_obj)
+		n -= RADEON_GPU_PAGE_SIZE;
+	if (rdev->ih.ring_obj)
+		n -= rdev->ih.ring_size;
+	n /= size;
+
+	gtt_obj = malloc(n * sizeof(*gtt_obj), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (!gtt_obj) {
+		DRM_ERROR("Failed to allocate %d pointers\n", n);
+		r = 1;
+		goto out_cleanup;
+	}
+
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+			     NULL, &vram_obj);
+	if (r) {
+		DRM_ERROR("Failed to create VRAM object\n");
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(vram_obj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+	if (r) {
+		DRM_ERROR("Failed to pin VRAM object\n");
+		goto out_cleanup;
+	}
+	for (i = 0; i < n; i++) {
+		void *gtt_map, *vram_map;
+		void **gtt_start, **gtt_end;
+		void **vram_start, **vram_end;
+
+		r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
+		if (r) {
+			DRM_ERROR("Failed to create GTT object %d\n", i);
+			goto out_cleanup;
+		}
+
+		r = radeon_bo_reserve(gtt_obj[i], false);
+		if (unlikely(r != 0))
+			goto out_cleanup;
+		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+		if (r) {
+			DRM_ERROR("Failed to pin GTT object %d\n", i);
+			goto out_cleanup;
+		}
+
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+		if (r) {
+			DRM_ERROR("Failed to map GTT object %d\n", i);
+			goto out_cleanup;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size);
+		     gtt_start < gtt_end;
+		     gtt_start++)
+			*gtt_start = gtt_start;
+
+		radeon_bo_kunmap(gtt_obj[i]);
+
+		if (ring == R600_RING_TYPE_DMA_INDEX)
+			r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		else
+			r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		if (r) {
+			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
+			goto out_cleanup;
+		}
+
+		r = radeon_fence_wait(fence, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
+			goto out_cleanup;
+		}
+
+		radeon_fence_unref(&fence);
+
+		r = radeon_bo_kmap(vram_obj, &vram_map);
+		if (r) {
+			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
+			goto out_cleanup;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size),
+		     vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size);
+		     vram_start < vram_end;
+		     gtt_start++, vram_start++) {
+			if (*vram_start != gtt_start) {
+				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
+					  "expected 0x%p (GTT/VRAM offset "
+					  "0x%16llx/0x%16llx)\n",
+					  i, *vram_start, gtt_start,
+					  (unsigned long long)
+					  ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start +
+					   (uintptr_t)gtt_start - (uintptr_t)gtt_map),
+					  (unsigned long long)
+					  ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start +
+					   (uintptr_t)gtt_start - (uintptr_t)gtt_map));
+				radeon_bo_kunmap(vram_obj);
+				goto out_cleanup;
+			}
+			*vram_start = vram_start;
+		}
+
+		radeon_bo_kunmap(vram_obj);
+
+		if (ring == R600_RING_TYPE_DMA_INDEX)
+			r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		else
+			r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		if (r) {
+			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
+			goto out_cleanup;
+		}
+
+		r = radeon_fence_wait(fence, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
+			goto out_cleanup;
+		}
+
+		radeon_fence_unref(&fence);
+
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+		if (r) {
+			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
+			goto out_cleanup;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size),
+		     vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size);
+		     gtt_start < gtt_end;
+		     gtt_start++, vram_start++) {
+			if (*gtt_start != vram_start) {
+				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
+					  "expected 0x%p (VRAM/GTT offset "
+					  "0x%16llx/0x%16llx)\n",
+					  i, *gtt_start, vram_start,
+					  (unsigned long long)
+					  ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start +
+					   (uintptr_t)vram_start - (uintptr_t)vram_map),
+					  (unsigned long long)
+					  ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start +
+					   (uintptr_t)vram_start - (uintptr_t)vram_map));
+				radeon_bo_kunmap(gtt_obj[i]);
+				goto out_cleanup;
+			}
+		}
+
+		radeon_bo_kunmap(gtt_obj[i]);
+
+		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%jx\n",
+			 (uintmax_t)gtt_addr - rdev->mc.gtt_start);
+	}
+
+out_cleanup:
+	if (vram_obj) {
+		if (radeon_bo_is_reserved(vram_obj)) {
+			radeon_bo_unpin(vram_obj);
+			radeon_bo_unreserve(vram_obj);
+		}
+		radeon_bo_unref(&vram_obj);
+	}
+	if (gtt_obj) {
+		for (i = 0; i < n; i++) {
+			if (gtt_obj[i]) {
+				if (radeon_bo_is_reserved(gtt_obj[i])) {
+					radeon_bo_unpin(gtt_obj[i]);
+					radeon_bo_unreserve(gtt_obj[i]);
+				}
+				radeon_bo_unref(&gtt_obj[i]);
+			}
+		}
+		free(gtt_obj, DRM_MEM_DRIVER);
+	}
+	if (fence) {
+		radeon_fence_unref(&fence);
+	}
+	if (r) {
+		DRM_ERROR("Error while testing BO move.\n");
+	}
+}
+
+void radeon_test_moves(struct radeon_device *rdev)
+{
+	if (rdev->asic->copy.dma)
+		radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+	if (rdev->asic->copy.blit)
+		radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+			   struct radeon_ring *ringA,
+			   struct radeon_ring *ringB)
+{
+	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+	struct radeon_semaphore *semaphore = NULL;
+	int r;
+
+	r = radeon_semaphore_create(rdev, &semaphore);
+	if (r) {
+		DRM_ERROR("Failed to create semaphore\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	r = radeon_fence_emit(rdev, &fence1, ringA->idx);
+	if (r) {
+		DRM_ERROR("Failed to emit fence 1\n");
+		radeon_ring_unlock_undo(rdev, ringA);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	r = radeon_fence_emit(rdev, &fence2, ringA->idx);
+	if (r) {
+		DRM_ERROR("Failed to emit fence 2\n");
+		radeon_ring_unlock_undo(rdev, ringA);
+		goto out_cleanup;
+	}
+	radeon_ring_unlock_commit(rdev, ringA);
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fence1)) {
+		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringB);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringB);
+
+	r = radeon_fence_wait(fence1, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence 1\n");
+		goto out_cleanup;
+	}
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fence2)) {
+		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringB);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringB);
+
+	r = radeon_fence_wait(fence2, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence 1\n");
+		goto out_cleanup;
+	}
+
+out_cleanup:
+	radeon_semaphore_free(rdev, &semaphore, NULL);
+
+	if (fence1)
+		radeon_fence_unref(&fence1);
+
+	if (fence2)
+		radeon_fence_unref(&fence2);
+
+	if (r)
+		DRM_ERROR("Error while testing ring sync (%d).\n", r);
+}
+
+static void radeon_test_ring_sync2(struct radeon_device *rdev,
+			    struct radeon_ring *ringA,
+			    struct radeon_ring *ringB,
+			    struct radeon_ring *ringC)
+{
+	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+	struct radeon_semaphore *semaphore = NULL;
+	bool sigA, sigB;
+	int i, r;
+
+	r = radeon_semaphore_create(rdev, &semaphore);
+	if (r) {
+		DRM_ERROR("Failed to create semaphore\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
+	if (r) {
+		DRM_ERROR("Failed to emit sync fence 1\n");
+		radeon_ring_unlock_undo(rdev, ringA);
+		goto out_cleanup;
+	}
+	radeon_ring_unlock_commit(rdev, ringA);
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
+	r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
+	if (r) {
+		DRM_ERROR("Failed to create sync fence 2\n");
+		radeon_ring_unlock_undo(rdev, ringB);
+		goto out_cleanup;
+	}
+	radeon_ring_unlock_commit(rdev, ringB);
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fenceA)) {
+		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+	if (radeon_fence_signaled(fenceB)) {
+		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringC, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringC);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringC);
+
+	for (i = 0; i < 30; ++i) {
+		mdelay(100);
+		sigA = radeon_fence_signaled(fenceA);
+		sigB = radeon_fence_signaled(fenceB);
+		if (sigA || sigB)
+			break;
+	}
+
+	if (!sigA && !sigB) {
+		DRM_ERROR("Neither fence A nor B has been signaled\n");
+		goto out_cleanup;
+	} else if (sigA && sigB) {
+		DRM_ERROR("Both fence A and B has been signaled\n");
+		goto out_cleanup;
+	}
+
+	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+	r = radeon_ring_lock(rdev, ringC, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringC);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringC);
+
+	mdelay(1000);
+
+	r = radeon_fence_wait(fenceA, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence A\n");
+		goto out_cleanup;
+	}
+	r = radeon_fence_wait(fenceB, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence B\n");
+		goto out_cleanup;
+	}
+
+out_cleanup:
+	radeon_semaphore_free(rdev, &semaphore, NULL);
+
+	if (fenceA)
+		radeon_fence_unref(&fenceA);
+
+	if (fenceB)
+		radeon_fence_unref(&fenceB);
+
+	if (r)
+		DRM_ERROR("Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+	int i, j, k;
+
+	for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_ring *ringA = &rdev->ring[i];
+		if (!ringA->ready)
+			continue;
+
+		for (j = 0; j < i; ++j) {
+			struct radeon_ring *ringB = &rdev->ring[j];
+			if (!ringB->ready)
+				continue;
+
+			DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+			radeon_test_ring_sync(rdev, ringA, ringB);
+
+			DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+			radeon_test_ring_sync(rdev, ringB, ringA);
+
+			for (k = 0; k < j; ++k) {
+				struct radeon_ring *ringC = &rdev->ring[k];
+				if (!ringC->ready)
+					continue;
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+				radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+				radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+				radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+				radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+				radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+				radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+			}
+		}
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_test.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_trace.h
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_trace.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_trace.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,86 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_trace.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+	    TP_PROTO(struct radeon_bo *bo),
+	    TP_ARGS(bo),
+	    TP_STRUCT__entry(
+			     __field(struct radeon_bo *, bo)
+			     __field(u32, pages)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->bo = bo;
+			   __entry->pages = bo->tbo.num_pages;
+			   ),
+	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>


Property changes on: trunk/sys/dev/drm2/radeon/radeon_trace.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_trace_points.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_trace_points.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_trace_points.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,13 @@
+/* $MidnightBSD$ */
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied at redhat.com>
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_trace_points.c 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"


Property changes on: trunk/sys/dev/drm2/radeon/radeon_trace_points.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/radeon_ttm.c
===================================================================
--- trunk/sys/dev/drm2/radeon/radeon_ttm.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/radeon_ttm.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,910 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse at freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/radeon_ttm.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/ttm/ttm_bo_api.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
+
+static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
+{
+	struct radeon_mman *mman;
+	struct radeon_device *rdev;
+
+	mman = container_of(bdev, struct radeon_mman, bdev);
+	rdev = container_of(mman, struct radeon_device, mman);
+	return rdev;
+}
+
+
+/*
+ * Global memory.
+ */
+static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int radeon_ttm_global_init(struct radeon_device *rdev)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	rdev->mman.mem_global_referenced = false;
+	global_ref = &rdev->mman.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &radeon_ttm_mem_global_init;
+	global_ref->release = &radeon_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	rdev->mman.bo_global_ref.mem_glob =
+		rdev->mman.mem_global_ref.object;
+	global_ref = &rdev->mman.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&rdev->mman.mem_global_ref);
+		return r;
+	}
+
+	rdev->mman.mem_global_referenced = true;
+	return 0;
+}
+
+static void radeon_ttm_global_fini(struct radeon_device *rdev)
+{
+	if (rdev->mman.mem_global_referenced) {
+		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
+		drm_global_item_unref(&rdev->mman.mem_global_ref);
+		rdev->mman.mem_global_referenced = false;
+	}
+}
+
+static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+				struct ttm_mem_type_manager *man)
+{
+	struct radeon_device *rdev;
+
+	rdev = radeon_get_rdev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_TT:
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = rdev->mc.gtt_start;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+#if __OS_HAS_AGP
+		if (rdev->flags & RADEON_IS_AGP) {
+			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+				DRM_ERROR("AGP is not enabled for memory type %u\n",
+					  (unsigned)type);
+				return -EINVAL;
+			}
+			if (!rdev->ddev->agp->cant_use_aperture)
+				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+			man->available_caching = TTM_PL_FLAG_UNCACHED |
+						 TTM_PL_FLAG_WC;
+			man->default_caching = TTM_PL_FLAG_WC;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		/* "On-card" video ram */
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = rdev->mc.vram_start;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	struct radeon_bo *rbo;
+	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
+		placement->fpfn = 0;
+		placement->lpfn = 0;
+		placement->placement = &placements;
+		placement->busy_placement = &placements;
+		placement->num_placement = 1;
+		placement->num_busy_placement = 1;
+		return;
+	}
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	switch (bo->mem.mem_type) {
+	case TTM_PL_VRAM:
+		if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+		else
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		break;
+	case TTM_PL_TT:
+	default:
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+	}
+	*placement = rbo->placement;
+}
+
+static int radeon_verify_access(struct ttm_buffer_object *bo)
+{
+	return 0;
+}
+
+static void radeon_move_null(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+
+	KASSERT(old_mem->mm_node == NULL, ("old_mem->mm_node != NULL"));
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+}
+
+static int radeon_move_blit(struct ttm_buffer_object *bo,
+			bool evict, bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem,
+			struct ttm_mem_reg *old_mem)
+{
+	struct radeon_device *rdev;
+	uint64_t old_start, new_start;
+	struct radeon_fence *fence;
+	int r, ridx;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	ridx = radeon_copy_ring_index(rdev);
+	old_start = old_mem->start << PAGE_SHIFT;
+	new_start = new_mem->start << PAGE_SHIFT;
+
+	switch (old_mem->mem_type) {
+	case TTM_PL_VRAM:
+		old_start += rdev->mc.vram_start;
+		break;
+	case TTM_PL_TT:
+		old_start += rdev->mc.gtt_start;
+		break;
+	default:
+		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		return -EINVAL;
+	}
+	switch (new_mem->mem_type) {
+	case TTM_PL_VRAM:
+		new_start += rdev->mc.vram_start;
+		break;
+	case TTM_PL_TT:
+		new_start += rdev->mc.gtt_start;
+		break;
+	default:
+		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		return -EINVAL;
+	}
+	if (!rdev->ring[ridx].ready) {
+		DRM_ERROR("Trying to move memory with ring turned off.\n");
+		return -EINVAL;
+	}
+
+	CTASSERT((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) == 0);
+
+	/* sync other rings */
+	fence = bo->sync_obj;
+	r = radeon_copy(rdev, old_start, new_start,
+			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+			&fence);
+	/* FIXME: handle copy error */
+	r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+				      evict, no_wait_gpu, new_mem);
+	radeon_fence_unref(&fence);
+	return r;
+}
+
+static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
+				bool evict, bool interruptible,
+				bool no_wait_gpu,
+				struct ttm_mem_reg *new_mem)
+{
+	struct radeon_device *rdev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg tmp_mem;
+	u32 placements;
+	struct ttm_placement placement;
+	int r;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+			     interruptible, no_wait_gpu);
+	if (unlikely(r)) {
+		return r;
+	}
+
+	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+
+	r = ttm_tt_bind(bo->ttm, &tmp_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+out_cleanup:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return r;
+}
+
+static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
+				bool evict, bool interruptible,
+				bool no_wait_gpu,
+				struct ttm_mem_reg *new_mem)
+{
+	struct radeon_device *rdev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg tmp_mem;
+	struct ttm_placement placement;
+	u32 placements;
+	int r;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+			     interruptible, no_wait_gpu);
+	if (unlikely(r)) {
+		return r;
+	}
+	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+out_cleanup:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return r;
+}
+
+static int radeon_bo_move(struct ttm_buffer_object *bo,
+			bool evict, bool interruptible,
+			bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem)
+{
+	struct radeon_device *rdev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int r;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		radeon_move_null(bo, new_mem);
+		return 0;
+	}
+	if ((old_mem->mem_type == TTM_PL_TT &&
+	     new_mem->mem_type == TTM_PL_SYSTEM) ||
+	    (old_mem->mem_type == TTM_PL_SYSTEM &&
+	     new_mem->mem_type == TTM_PL_TT)) {
+		/* bind is enough */
+		radeon_move_null(bo, new_mem);
+		return 0;
+	}
+	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
+	    rdev->asic->copy.copy == NULL) {
+		/* use memcpy */
+		goto memcpy;
+	}
+
+	if (old_mem->mem_type == TTM_PL_VRAM &&
+	    new_mem->mem_type == TTM_PL_SYSTEM) {
+		r = radeon_move_vram_ram(bo, evict, interruptible,
+					no_wait_gpu, new_mem);
+	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
+		   new_mem->mem_type == TTM_PL_VRAM) {
+		r = radeon_move_ram_vram(bo, evict, interruptible,
+					    no_wait_gpu, new_mem);
+	} else {
+		r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+	}
+
+	if (r) {
+memcpy:
+		r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+	}
+	return r;
+}
+
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_TT:
+#if __OS_HAS_AGP
+		if (rdev->flags & RADEON_IS_AGP) {
+			/* RADEON_IS_AGP is set only if AGP is active */
+			mem->bus.offset = mem->start << PAGE_SHIFT;
+			mem->bus.base = rdev->mc.agp_base;
+			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		/* check if it's visible */
+		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+			return -EINVAL;
+		mem->bus.base = rdev->mc.aper_base;
+		mem->bus.is_iomem = true;
+#ifdef __alpha__
+		/*
+		 * Alpha: use bus.addr to hold the ioremap() return,
+		 * so we can modify bus.base below.
+		 */
+		if (mem->placement & TTM_PL_FLAG_WC)
+			mem->bus.addr =
+				ioremap_wc(mem->bus.base + mem->bus.offset,
+					   mem->bus.size);
+		else
+			mem->bus.addr =
+				ioremap_nocache(mem->bus.base + mem->bus.offset,
+						mem->bus.size);
+
+		/*
+		 * Alpha: Use just the bus offset plus
+		 * the hose/domain memory base for bus.base.
+		 * It then can be used to build PTEs for VRAM
+		 * access, as done in ttm_bo_vm_fault().
+		 */
+		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
+			rdev->ddev->hose->dense_mem_base;
+#endif
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
+{
+	return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
+}
+
+static int radeon_sync_obj_flush(void *sync_obj)
+{
+	return 0;
+}
+
+static void radeon_sync_obj_unref(void **sync_obj)
+{
+	radeon_fence_unref((struct radeon_fence **)sync_obj);
+}
+
+static void *radeon_sync_obj_ref(void *sync_obj)
+{
+	return radeon_fence_ref((struct radeon_fence *)sync_obj);
+}
+
+static bool radeon_sync_obj_signaled(void *sync_obj)
+{
+	return radeon_fence_signaled((struct radeon_fence *)sync_obj);
+}
+
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+	struct ttm_dma_tt		ttm;
+	struct radeon_device		*rdev;
+	u64				offset;
+};
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+				   struct ttm_mem_reg *bo_mem)
+{
+	struct radeon_ttm_tt *gtt = (void*)ttm;
+	int r;
+
+	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	if (!ttm->num_pages) {
+		DRM_ERROR("nothing to bind %lu pages for mreg %p back %p!\n",
+		     ttm->num_pages, bo_mem, ttm);
+	}
+	r = radeon_gart_bind(gtt->rdev, gtt->offset,
+			     ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+	if (r) {
+		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+			  ttm->num_pages, (unsigned)gtt->offset);
+		return r;
+	}
+	return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+
+	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+	return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+
+	ttm_dma_tt_fini(&gtt->ttm);
+	free(gtt, DRM_MEM_DRIVER);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+	.bind = &radeon_ttm_backend_bind,
+	.unbind = &radeon_ttm_backend_unbind,
+	.destroy = &radeon_ttm_backend_destroy,
+};
+
+static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+				    unsigned long size, uint32_t page_flags,
+				    vm_page_t dummy_read_page)
+{
+	struct radeon_device *rdev;
+	struct radeon_ttm_tt *gtt;
+
+	rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+	if (rdev->flags & RADEON_IS_AGP) {
+		return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
+					 size, page_flags, dummy_read_page);
+	}
+#endif
+
+	gtt = malloc(sizeof(struct radeon_ttm_tt),
+	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+	if (gtt == NULL) {
+		return NULL;
+	}
+	gtt->ttm.ttm.func = &radeon_backend_func;
+	gtt->rdev = rdev;
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+		free(gtt, DRM_MEM_DRIVER);
+		return NULL;
+	}
+	return &gtt->ttm.ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	struct radeon_device *rdev;
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+	unsigned i;
+	int r;
+#ifdef FREEBSD_WIP
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+#endif /* FREEBSD_WIP */
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+#ifdef FREEBSD_WIP
+	if (slave && ttm->sg) {
+		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+						 gtt->ttm.dma_address, ttm->num_pages);
+		ttm->state = tt_unbound;
+		return 0;
+	}
+#endif /* FREEBSD_WIP */
+
+	rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+	if (rdev->flags & RADEON_IS_AGP) {
+		return ttm_agp_tt_populate(ttm);
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		return ttm_dma_populate(&gtt->ttm, rdev->dev);
+	}
+#endif
+
+	r = ttm_pool_populate(ttm);
+	if (r) {
+		return r;
+	}
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		gtt->ttm.dma_address[i] = VM_PAGE_TO_PHYS(ttm->pages[i]);
+#ifdef FREEBSD_WIP
+		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+						       0, PAGE_SIZE,
+						       PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+			while (--i) {
+				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+				gtt->ttm.dma_address[i] = 0;
+			}
+			ttm_pool_unpopulate(ttm);
+			return -EFAULT;
+		}
+#endif /* FREEBSD_WIP */
+	}
+	return 0;
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	struct radeon_device *rdev;
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+	unsigned i;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (slave)
+		return;
+
+	rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+	if (rdev->flags & RADEON_IS_AGP) {
+		ttm_agp_tt_unpopulate(ttm);
+		return;
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+		return;
+	}
+#endif
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		if (gtt->ttm.dma_address[i]) {
+			gtt->ttm.dma_address[i] = 0;
+#ifdef FREEBSD_WIP
+			pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+#endif /* FREEBSD_WIP */
+		}
+	}
+
+	ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_bo_driver radeon_bo_driver = {
+	.ttm_tt_create = &radeon_ttm_tt_create,
+	.ttm_tt_populate = &radeon_ttm_tt_populate,
+	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
+	.invalidate_caches = &radeon_invalidate_caches,
+	.init_mem_type = &radeon_init_mem_type,
+	.evict_flags = &radeon_evict_flags,
+	.move = &radeon_bo_move,
+	.verify_access = &radeon_verify_access,
+	.sync_obj_signaled = &radeon_sync_obj_signaled,
+	.sync_obj_wait = &radeon_sync_obj_wait,
+	.sync_obj_flush = &radeon_sync_obj_flush,
+	.sync_obj_unref = &radeon_sync_obj_unref,
+	.sync_obj_ref = &radeon_sync_obj_ref,
+	.move_notify = &radeon_bo_move_notify,
+	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
+	.io_mem_free = &radeon_ttm_io_mem_free,
+};
+
+int radeon_ttm_init(struct radeon_device *rdev)
+{
+	int r;
+
+	r = radeon_ttm_global_init(rdev);
+	if (r) {
+		return r;
+	}
+	/* No others user of address space so set it to 0 */
+	r = ttm_bo_device_init(&rdev->mman.bdev,
+			       rdev->mman.bo_global_ref.ref.object,
+			       &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
+			       rdev->need_dma32);
+	if (r) {
+		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+		return r;
+	}
+	rdev->mman.initialized = true;
+	rdev->ddev->drm_ttm_bdev = &rdev->mman.bdev;
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+				rdev->mc.real_vram_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing VRAM heap.\n");
+		return r;
+	}
+	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_VRAM,
+			     NULL, &rdev->stollen_vga_memory);
+	if (r) {
+		return r;
+	}
+	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+	if (r) {
+		radeon_bo_unref(&rdev->stollen_vga_memory);
+		return r;
+	}
+	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+	radeon_bo_unreserve(rdev->stollen_vga_memory);
+	if (r) {
+		radeon_bo_unref(&rdev->stollen_vga_memory);
+		return r;
+	}
+	DRM_INFO("radeon: %uM of VRAM memory ready\n",
+		 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+				rdev->mc.gtt_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing GTT heap.\n");
+		return r;
+	}
+	DRM_INFO("radeon: %uM of GTT memory ready.\n",
+		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
+
+	r = radeon_ttm_debugfs_init(rdev);
+	if (r) {
+		DRM_ERROR("Failed to init debugfs\n");
+		return r;
+	}
+	return 0;
+}
+
+void radeon_ttm_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->mman.initialized)
+		return;
+	if (rdev->stollen_vga_memory) {
+		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+		if (r == 0) {
+			radeon_bo_unpin(rdev->stollen_vga_memory);
+			radeon_bo_unreserve(rdev->stollen_vga_memory);
+		}
+		radeon_bo_unref(&rdev->stollen_vga_memory);
+	}
+	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
+	ttm_bo_device_release(&rdev->mman.bdev);
+	radeon_gart_fini(rdev);
+	radeon_ttm_global_fini(rdev);
+	rdev->mman.initialized = false;
+	DRM_INFO("radeon: ttm finalized\n");
+}
+
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+{
+	struct ttm_mem_type_manager *man;
+
+	if (!rdev->mman.initialized)
+		return;
+
+	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
+	man->size = size >> PAGE_SHIFT;
+}
+
+#ifdef FREEBSD_WIP
+static struct vm_operations_struct radeon_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo;
+	struct radeon_device *rdev;
+	int r;
+
+	bo = (struct ttm_buffer_object *)vma->vm_private_data;	
+	if (bo == NULL) {
+		return VM_FAULT_NOPAGE;
+	}
+	rdev = radeon_get_rdev(bo->bdev);
+	sx_slock(&rdev->pm.mclk_lock);
+	r = ttm_vm_ops->fault(vma, vmf);
+	sx_sunlock(&rdev->pm.mclk_lock);
+	return r;
+}
+
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct radeon_device *rdev;
+	int r;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+		return drm_mmap(filp, vma);
+	}
+
+	file_priv = filp->private_data;
+	rdev = file_priv->minor->dev->dev_private;
+	if (rdev == NULL) {
+		return -EINVAL;
+	}
+	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	if (unlikely(ttm_vm_ops == NULL)) {
+		ttm_vm_ops = vma->vm_ops;
+		radeon_ttm_vm_ops = *ttm_vm_ops;
+		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+	}
+	vma->vm_ops = &radeon_ttm_vm_ops;
+	return 0;
+}
+#endif /* FREEBSD_WIP */
+
+
+#define RADEON_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_mm_dump_table(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ret;
+	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+
+	spin_lock(&glob->lru_lock);
+	ret = drm_mm_dump_table(m, mm);
+	spin_unlock(&glob->lru_lock);
+	return ret;
+}
+#endif
+
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
+	unsigned i;
+
+	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
+		if (i == 0)
+			sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
+		else
+			sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
+		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
+		radeon_mem_types_list[i].driver_features = 0;
+		if (i == 0)
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+		else
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+
+	}
+	/* Add ttm page pool to debugfs */
+	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+	radeon_mem_types_list[i].driver_features = 0;
+	radeon_mem_types_list[i++].data = NULL;
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+		radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+		radeon_mem_types_list[i].driver_features = 0;
+		radeon_mem_types_list[i++].data = NULL;
+	}
+#endif
+	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
+
+#endif
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/radeon_ttm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/cayman
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/cayman	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/cayman	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,642 @@
+cayman 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089A8 VGT_COMPUTE_INDEX
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x000089B0 VGT_HS_OFFCHIP_PARAM
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028354 SX_SURFACE_SYNC
+0x0002835C SX_SCATTER_EXPORT_SIZE
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 SPI_GPR_MGMT
+0x000286FC SPI_LDS_MGMT
+0x00028700 SPI_STACK_MGMT
+0x00028704 SPI_WAVE_MGMT_1
+0x00028708 SPI_WAVE_MGMT_2
+0x00028720 GDS_ADDR_BASE
+0x00028724 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A70 IA_ENHANCE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AA8 IA_MULTI_VGT_PARAM
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028BD4 PA_SC_CENTROID_PRIORITY_0
+0x00028BD8 PA_SC_CENTROID_PRIORITY_1
+0x00028BDC PA_SC_LINE_CNTL
+0x00028BE4 PA_SU_VTX_CNTL
+0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ
+0x00028BEC PA_CL_GB_VERT_DISC_ADJ
+0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ
+0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0
+0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1
+0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2
+0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3
+0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0
+0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1
+0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2
+0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3
+0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
+0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
+0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/cayman
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/evergreen
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/evergreen	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/evergreen	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,644 @@
+evergreen 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
+0x00008044 WAIT_UNTIL_POLL_CNTL
+0x00008048 WAIT_UNTIL_POLL_MASK
+0x0000804c WAIT_UNTIL_POLL_REFDATA
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D90 SQ_DYN_GPR_OPTIMIZATION
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008D98 SQ_DYN_GPR_THREAD_LIMIT
+0x00008D9C SQ_DYN_GPR_LDS_LIMIT
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C0C SQ_GPR_RESOURCE_MGMT_3
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008C18 SQ_THREAD_RESOURCE_MGMT
+0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
+0x00008C20 SQ_STACK_RESOURCE_MGMT_1
+0x00008C24 SQ_STACK_RESOURCE_MGMT_2
+0x00008C28 SQ_STACK_RESOURCE_MGMT_3
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E2C SQ_LDS_RESOURCE_MGMT
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
+0x00009700 VC_CNTL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028354 SX_SURFACE_SYNC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x00028720 GDS_ADDR_BASE
+0x00028724 GDS_ADDR_SIZE
+0x00028728 GDS_ORDERED_WAVE_PER_SE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B5C VGT_LS_SIZE
+0x00028B60 VGT_HS_SIZE
+0x00028B64 VGT_LS_HS_ALLOC
+0x00028B68 VGT_HS_PATCH_CONST
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028C00 PA_SC_LINE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
+0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
+0x00028C3C PA_SC_AA_MASK
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/evergreen
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/r100
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/r100	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/r100	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,105 @@
+r100 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1810 FOG_3D_TABLE_START
+0x1814 FOG_3D_TABLE_END
+0x1a14 FOG_TABLE_INDEX
+0x1a18 FOG_TABLE_DATA
+0x1c14 PP_MISC
+0x1c18 PP_FOG_COLOR
+0x1c1c RE_SOLID_COLOR
+0x1c20 RB3D_BLENDCNTL
+0x1c4c SE_CNTL
+0x1c50 SE_COORD_FMT
+0x1c60 PP_TXCBLEND_0
+0x1c64 PP_TXABLEND_0
+0x1c68 PP_TFACTOR_0
+0x1c78 PP_TXCBLEND_1
+0x1c7c PP_TXABLEND_1
+0x1c80 PP_TFACTOR_1
+0x1c90 PP_TXCBLEND_2
+0x1c94 PP_TXABLEND_2
+0x1c98 PP_TFACTOR_2
+0x1cc8 RE_STIPPLE_ADDR
+0x1ccc RE_STIPPLE_DATA
+0x1cd0 RE_LINE_PATTERN
+0x1cd4 RE_LINE_STATE
+0x1d40 PP_BORDER_COLOR0
+0x1d44 PP_BORDER_COLOR1
+0x1d48 PP_BORDER_COLOR2
+0x1d7c RB3D_STENCILREFMASK
+0x1d80 RB3D_ROPCNTL
+0x1d84 RB3D_PLANEMASK
+0x1d98 VAP_VPORT_XSCALE
+0x1d9C VAP_VPORT_XOFFSET
+0x1da0 VAP_VPORT_YSCALE
+0x1da4 VAP_VPORT_YOFFSET
+0x1da8 VAP_VPORT_ZSCALE
+0x1dac VAP_VPORT_ZOFFSET
+0x1db0 SE_ZBIAS_FACTOR
+0x1db4 SE_ZBIAS_CONSTANT
+0x1db8 SE_LINE_WIDTH
+0x2140 SE_CNTL_STATUS
+0x2200 SE_TCL_VECTOR_INDX_REG
+0x2204 SE_TCL_VECTOR_DATA_REG
+0x2208 SE_TCL_SCALAR_INDX_REG
+0x220c SE_TCL_SCALAR_DATA_REG
+0x2210 SE_TCL_MATERIAL_EMISSIVE_RED
+0x2214 SE_TCL_MATERIAL_EMISSIVE_GREEN
+0x2218 SE_TCL_MATERIAL_EMISSIVE_BLUE
+0x221c SE_TCL_MATERIAL_EMISSIVE_ALPHA
+0x2220 SE_TCL_MATERIAL_AMBIENT_RED
+0x2224 SE_TCL_MATERIAL_AMBIENT_GREEN
+0x2228 SE_TCL_MATERIAL_AMBIENT_BLUE
+0x222c SE_TCL_MATERIAL_AMBIENT_ALPHA
+0x2230 SE_TCL_MATERIAL_DIFFUSE_RED
+0x2234 SE_TCL_MATERIAL_DIFFUSE_GREEN
+0x2238 SE_TCL_MATERIAL_DIFFUSE_BLUE
+0x223c SE_TCL_MATERIAL_DIFFUSE_ALPHA
+0x2240 SE_TCL_MATERIAL_SPECULAR_RED
+0x2244 SE_TCL_MATERIAL_SPECULAR_GREEN
+0x2248 SE_TCL_MATERIAL_SPECULAR_BLUE
+0x224c SE_TCL_MATERIAL_SPECULAR_ALPHA
+0x2250 SE_TCL_SHININESS
+0x2254 SE_TCL_OUTPUT_VTX_FMT
+0x2258 SE_TCL_OUTPUT_VTX_SEL
+0x225c SE_TCL_MATRIX_SELECT_0
+0x2260 SE_TCL_MATRIX_SELECT_1
+0x2264 SE_TCL_UCP_VERT_BLEND_CNTL
+0x2268 SE_TCL_TEXTURE_PROC_CTL
+0x226c SE_TCL_LIGHT_MODEL_CTL
+0x2270 SE_TCL_PER_LIGHT_CTL_0
+0x2274 SE_TCL_PER_LIGHT_CTL_1
+0x2278 SE_TCL_PER_LIGHT_CTL_2
+0x227c SE_TCL_PER_LIGHT_CTL_3
+0x2284 SE_TCL_STATE_FLUSH
+0x26c0 RE_TOP_LEFT
+0x26c4 RE_MISC
+0x3290 RB3D_ZPASS_DATA


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/r100
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/r200
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/r200	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/r200	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,186 @@
+r200 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1c14 PP_MISC
+0x1c18 PP_FOG_COLOR
+0x1c1c RE_SOLID_COLOR
+0x1c20 RB3D_BLENDCNTL
+0x1c4c SE_CNTL
+0x1c50 RE_CNTL
+0x1cc8 RE_STIPPLE_ADDR
+0x1ccc RE_STIPPLE_DATA
+0x1cd0 RE_LINE_PATTERN
+0x1cd4 RE_LINE_STATE
+0x1cd8 RE_SCISSOR_TL_0
+0x1cdc RE_SCISSOR_BR_0
+0x1ce0 RE_SCISSOR_TL_1
+0x1ce4 RE_SCISSOR_BR_1
+0x1ce8 RE_SCISSOR_TL_2
+0x1cec RE_SCISSOR_BR_2
+0x1d60 RB3D_DEPTHXY_OFFSET
+0x1d7c RB3D_STENCILREFMASK
+0x1d80 RB3D_ROPCNTL
+0x1d84 RB3D_PLANEMASK
+0x1d98 VAP_VPORT_XSCALE
+0x1d9c VAP_VPORT_XOFFSET
+0x1da0 VAP_VPORT_YSCALE
+0x1da4 VAP_VPORT_YOFFSET
+0x1da8 VAP_VPORT_ZSCALE
+0x1dac VAP_VPORT_ZOFFSET
+0x1db0 SE_ZBIAS_FACTOR
+0x1db4 SE_ZBIAS_CONSTANT
+0x1db8 SE_LINE_WIDTH
+0x2080 SE_VAP_CNTL
+0x2090 SE_TCL_OUTPUT_VTX_FMT_0
+0x2094 SE_TCL_OUTPUT_VTX_FMT_1
+0x20b0 SE_VTE_CNTL
+0x2140 SE_CNTL_STATUS
+0x2180 SE_VTX_STATE_CNTL
+0x2200 SE_TCL_VECTOR_INDX_REG
+0x2204 SE_TCL_VECTOR_DATA_REG
+0x2208 SE_TCL_SCALAR_INDX_REG
+0x220c SE_TCL_SCALAR_DATA_REG
+0x2230 SE_TCL_MATRIX_SEL_0
+0x2234 SE_TCL_MATRIX_SEL_1
+0x2238 SE_TCL_MATRIX_SEL_2
+0x223c SE_TCL_MATRIX_SEL_3
+0x2240 SE_TCL_MATRIX_SEL_4
+0x2250 SE_TCL_OUTPUT_VTX_COMP_SEL
+0x2254 SE_TCL_INPUT_VTX_VECTOR_ADDR_0
+0x2258 SE_TCL_INPUT_VTX_VECTOR_ADDR_1
+0x225c SE_TCL_INPUT_VTX_VECTOR_ADDR_2
+0x2260 SE_TCL_INPUT_VTX_VECTOR_ADDR_3
+0x2268 SE_TCL_LIGHT_MODEL_CTL_0
+0x226c SE_TCL_LIGHT_MODEL_CTL_1
+0x2270 SE_TCL_PER_LIGHT_CTL_0
+0x2274 SE_TCL_PER_LIGHT_CTL_1
+0x2278 SE_TCL_PER_LIGHT_CTL_2
+0x227c SE_TCL_PER_LIGHT_CTL_3
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x22a8 SE_TCL_TEX_PROC_CTL_2
+0x22ac SE_TCL_TEX_PROC_CTL_3
+0x22b0 SE_TCL_TEX_PROC_CTL_0
+0x22b4 SE_TCL_TEX_PROC_CTL_1
+0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
+0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
+0x22c4 SE_TCL_POINT_SPRITE_CNTL
+0x22d0 SE_PVS_CNTL
+0x22d4 SE_PVS_CONST_CNTL
+0x2648 RE_POINTSIZE
+0x26c0 RE_TOP_LEFT
+0x26c4 RE_MISC
+0x26f0 RE_AUX_SCISSOR_CNTL
+0x2c14 PP_BORDER_COLOR_0
+0x2c34 PP_BORDER_COLOR_1
+0x2c54 PP_BORDER_COLOR_2
+0x2c74 PP_BORDER_COLOR_3
+0x2c94 PP_BORDER_COLOR_4
+0x2cb4 PP_BORDER_COLOR_5
+0x2cc4 PP_CNTL_X
+0x2cf8 PP_TRI_PERF
+0x2cfc PP_PERF_CNTL
+0x2d9c PP_TAM_DEBUG3
+0x2ee0 PP_TFACTOR_0
+0x2ee4 PP_TFACTOR_1
+0x2ee8 PP_TFACTOR_2
+0x2eec PP_TFACTOR_3
+0x2ef0 PP_TFACTOR_4
+0x2ef4 PP_TFACTOR_5
+0x2ef8 PP_TFACTOR_6
+0x2efc PP_TFACTOR_7
+0x2f00 PP_TXCBLEND_0
+0x2f04 PP_TXCBLEND2_0
+0x2f08 PP_TXABLEND_0
+0x2f0c PP_TXABLEND2_0
+0x2f10 PP_TXCBLEND_1
+0x2f14 PP_TXCBLEND2_1
+0x2f18 PP_TXABLEND_1
+0x2f1c PP_TXABLEND2_1
+0x2f20 PP_TXCBLEND_2
+0x2f24 PP_TXCBLEND2_2
+0x2f28 PP_TXABLEND_2
+0x2f2c PP_TXABLEND2_2
+0x2f30 PP_TXCBLEND_3
+0x2f34 PP_TXCBLEND2_3
+0x2f38 PP_TXABLEND_3
+0x2f3c PP_TXABLEND2_3
+0x2f40 PP_TXCBLEND_4
+0x2f44 PP_TXCBLEND2_4
+0x2f48 PP_TXABLEND_4
+0x2f4c PP_TXABLEND2_4
+0x2f50 PP_TXCBLEND_5
+0x2f54 PP_TXCBLEND2_5
+0x2f58 PP_TXABLEND_5
+0x2f5c PP_TXABLEND2_5
+0x2f60 PP_TXCBLEND_6
+0x2f64 PP_TXCBLEND2_6
+0x2f68 PP_TXABLEND_6
+0x2f6c PP_TXABLEND2_6
+0x2f70 PP_TXCBLEND_7
+0x2f74 PP_TXCBLEND2_7
+0x2f78 PP_TXABLEND_7
+0x2f7c PP_TXABLEND2_7
+0x2f80 PP_TXCBLEND_8
+0x2f84 PP_TXCBLEND2_8
+0x2f88 PP_TXABLEND_8
+0x2f8c PP_TXABLEND2_8
+0x2f90 PP_TXCBLEND_9
+0x2f94 PP_TXCBLEND2_9
+0x2f98 PP_TXABLEND_9
+0x2f9c PP_TXABLEND2_9
+0x2fa0 PP_TXCBLEND_10
+0x2fa4 PP_TXCBLEND2_10
+0x2fa8 PP_TXABLEND_10
+0x2fac PP_TXABLEND2_10
+0x2fb0 PP_TXCBLEND_11
+0x2fb4 PP_TXCBLEND2_11
+0x2fb8 PP_TXABLEND_11
+0x2fbc PP_TXABLEND2_11
+0x2fc0 PP_TXCBLEND_12
+0x2fc4 PP_TXCBLEND2_12
+0x2fc8 PP_TXABLEND_12
+0x2fcc PP_TXABLEND2_12
+0x2fd0 PP_TXCBLEND_13
+0x2fd4 PP_TXCBLEND2_13
+0x2fd8 PP_TXABLEND_13
+0x2fdc PP_TXABLEND2_13
+0x2fe0 PP_TXCBLEND_14
+0x2fe4 PP_TXCBLEND2_14
+0x2fe8 PP_TXABLEND_14
+0x2fec PP_TXABLEND2_14
+0x2ff0 PP_TXCBLEND_15
+0x2ff4 PP_TXCBLEND2_15
+0x2ff8 PP_TXABLEND_15
+0x2ffc PP_TXABLEND2_15
+0x3218 RB3D_BLENCOLOR
+0x321c RB3D_ABLENDCNTL
+0x3220 RB3D_CBLENDCNTL
+0x3290 RB3D_ZPASS_DATA
+


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/r200
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/r300
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/r300	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/r300	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,714 @@
+r300 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/r300
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/r420
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/r420	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/r420	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,780 @@
+r420 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/r420
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/r600
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/r600	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/r600	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,755 @@
+r600 0x9400
+0x000287A0 R7xx_CB_SHADER_CONTROL
+0x00028230 R7xx_PA_SC_EDGERULE
+0x000286C8 R7xx_SPI_THREAD_GROUPING
+0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008490 CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088C4 VGT_CACHE_INVALIDATION
+0x00028A50 VGT_ENHANCE
+0x000088CC VGT_ES_PER_GS
+0x00028A2C VGT_GROUP_DECR
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x000088C8 VGT_GS_PER_ES
+0x000088E8 VGT_GS_PER_VS
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x0000895C VGT_INDEX_TYPE
+0x00028408 VGT_INDX_OFFSET
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00008958 VGT_PRIMITIVE_TYPE
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028810 PA_CL_CLIP_CNTL
+0x00008A14 PA_CL_ENHANCE
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028820 PA_CL_NANINF_CNTL
+0x00028E1C PA_CL_POINT_CULL_RAD
+0x00028E18 PA_CL_POINT_SIZE
+0x00028E10 PA_CL_POINT_X_RAD
+0x00028E14 PA_CL_POINT_Y_RAD
+0x00028E2C PA_CL_UCP_0_W
+0x00028E3C PA_CL_UCP_1_W
+0x00028E4C PA_CL_UCP_2_W
+0x00028E5C PA_CL_UCP_3_W
+0x00028E6C PA_CL_UCP_4_W
+0x00028E7C PA_CL_UCP_5_W
+0x00028E20 PA_CL_UCP_0_X
+0x00028E30 PA_CL_UCP_1_X
+0x00028E40 PA_CL_UCP_2_X
+0x00028E50 PA_CL_UCP_3_X
+0x00028E60 PA_CL_UCP_4_X
+0x00028E70 PA_CL_UCP_5_X
+0x00028E24 PA_CL_UCP_0_Y
+0x00028E34 PA_CL_UCP_1_Y
+0x00028E44 PA_CL_UCP_2_Y
+0x00028E54 PA_CL_UCP_3_Y
+0x00028E64 PA_CL_UCP_4_Y
+0x00028E74 PA_CL_UCP_5_Y
+0x00028E28 PA_CL_UCP_0_Z
+0x00028E38 PA_CL_UCP_1_Z
+0x00028E48 PA_CL_UCP_2_Z
+0x00028E58 PA_CL_UCP_3_Z
+0x00028E68 PA_CL_UCP_4_Z
+0x00028E78 PA_CL_UCP_5_Z
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x00028C48 PA_SC_AA_MASK
+0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
+0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
+0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
+0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00008BF0 PA_SC_ENHANCE
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028C00 PA_SC_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00028A4C PA_SC_MODE_CNTL
+0x00028A48 PA_SC_MPASS_PS_CNTL
+0x00008B20 PA_SC_MULTI_CHIP_CNTL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A00 PA_SU_POINT_SIZE
+0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028DFC PA_SU_POLY_OFFSET_CLAMP
+0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C10 SQ_STACK_RESOURCE_MGMT_1
+0x00008C14 SQ_STACK_RESOURCE_MGMT_2
+0x00008C0C SQ_THREAD_RESOURCE_MGMT
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x000288E0 SQ_VTX_SEMANTIC_CLEAR
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000288D8 SQ_PGM_CF_OFFSET_ES
+0x000288DC SQ_PGM_CF_OFFSET_FS
+0x000288D4 SQ_PGM_CF_OFFSET_GS
+0x000288CC SQ_PGM_CF_OFFSET_PS
+0x000288D0 SQ_PGM_CF_OFFSET_VS
+0x00028854 SQ_PGM_EXPORTS_PS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x000288A4 SQ_PGM_RESOURCES_FS
+0x0002887C SQ_PGM_RESOURCES_GS
+0x00028850 SQ_PGM_RESOURCES_PS
+0x00028868 SQ_PGM_RESOURCES_VS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x000286DC SPI_FOG_CNTL
+0x000286E4 SPI_FOG_FUNC_BIAS
+0x000286E0 SPI_FOG_FUNC_SCALE
+0x000286D8 SPI_INPUT_Z
+0x000286D4 SPI_INTERP_CONTROL_0
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286C4 SPI_VS_OUT_CONFIG
+0x00028614 SPI_VS_OUT_ID_0
+0x00028618 SPI_VS_OUT_ID_1
+0x0002861C SPI_VS_OUT_ID_2
+0x00028620 SPI_VS_OUT_ID_3
+0x00028624 SPI_VS_OUT_ID_4
+0x00028628 SPI_VS_OUT_ID_5
+0x0002862C SPI_VS_OUT_ID_6
+0x00028630 SPI_VS_OUT_ID_7
+0x00028634 SPI_VS_OUT_ID_8
+0x00028638 SPI_VS_OUT_ID_9
+0x00028438 SX_ALPHA_REF
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028354 SX_SURFACE_SYNC
+0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00009604 TC_INVALIDATE
+0x00009400 TD_FILTER4
+0x00009404 TD_FILTER4_1
+0x00009408 TD_FILTER4_2
+0x0000940C TD_FILTER4_3
+0x00009410 TD_FILTER4_4
+0x00009414 TD_FILTER4_5
+0x00009418 TD_FILTER4_6
+0x0000941C TD_FILTER4_7
+0x00009420 TD_FILTER4_8
+0x00009424 TD_FILTER4_9
+0x00009428 TD_FILTER4_10
+0x0000942C TD_FILTER4_11
+0x00009430 TD_FILTER4_12
+0x00009434 TD_FILTER4_13
+0x00009438 TD_FILTER4_14
+0x0000943C TD_FILTER4_15
+0x00009440 TD_FILTER4_16
+0x00009444 TD_FILTER4_17
+0x00009448 TD_FILTER4_18
+0x0000944C TD_FILTER4_19
+0x00009450 TD_FILTER4_20
+0x00009454 TD_FILTER4_21
+0x00009458 TD_FILTER4_22
+0x0000945C TD_FILTER4_23
+0x00009460 TD_FILTER4_24
+0x00009464 TD_FILTER4_25
+0x00009468 TD_FILTER4_26
+0x0000946C TD_FILTER4_27
+0x00009470 TD_FILTER4_28
+0x00009474 TD_FILTER4_29
+0x00009478 TD_FILTER4_30
+0x0000947C TD_FILTER4_31
+0x00009480 TD_FILTER4_32
+0x00009484 TD_FILTER4_33
+0x00009488 TD_FILTER4_34
+0x0000948C TD_FILTER4_35
+0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
+0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
+0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
+0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
+0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
+0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
+0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
+0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
+0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
+0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
+0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
+0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
+0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
+0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
+0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
+0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
+0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
+0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
+0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
+0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
+0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
+0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
+0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
+0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
+0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
+0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
+0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
+0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
+0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
+0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
+0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
+0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
+0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
+0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
+0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
+0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
+0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
+0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
+0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
+0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
+0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
+0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
+0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
+0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
+0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
+0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
+0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
+0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
+0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
+0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
+0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
+0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
+0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
+0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
+0x0000A800 TD_GS_SAMPLER0_BORDER_RED
+0x0000A810 TD_GS_SAMPLER1_BORDER_RED
+0x0000A820 TD_GS_SAMPLER2_BORDER_RED
+0x0000A830 TD_GS_SAMPLER3_BORDER_RED
+0x0000A840 TD_GS_SAMPLER4_BORDER_RED
+0x0000A850 TD_GS_SAMPLER5_BORDER_RED
+0x0000A860 TD_GS_SAMPLER6_BORDER_RED
+0x0000A870 TD_GS_SAMPLER7_BORDER_RED
+0x0000A880 TD_GS_SAMPLER8_BORDER_RED
+0x0000A890 TD_GS_SAMPLER9_BORDER_RED
+0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
+0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
+0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
+0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
+0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
+0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
+0x0000A900 TD_GS_SAMPLER16_BORDER_RED
+0x0000A910 TD_GS_SAMPLER17_BORDER_RED
+0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
+0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
+0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
+0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
+0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
+0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
+0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
+0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
+0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
+0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
+0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
+0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
+0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
+0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
+0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
+0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
+0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
+0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
+0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
+0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
+0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
+0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
+0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
+0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
+0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
+0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
+0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
+0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
+0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
+0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
+0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
+0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
+0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
+0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
+0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
+0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
+0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
+0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
+0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
+0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
+0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
+0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
+0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
+0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
+0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
+0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
+0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
+0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
+0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
+0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
+0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
+0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
+0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
+0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
+0x0000A400 TD_PS_SAMPLER0_BORDER_RED
+0x0000A410 TD_PS_SAMPLER1_BORDER_RED
+0x0000A420 TD_PS_SAMPLER2_BORDER_RED
+0x0000A430 TD_PS_SAMPLER3_BORDER_RED
+0x0000A440 TD_PS_SAMPLER4_BORDER_RED
+0x0000A450 TD_PS_SAMPLER5_BORDER_RED
+0x0000A460 TD_PS_SAMPLER6_BORDER_RED
+0x0000A470 TD_PS_SAMPLER7_BORDER_RED
+0x0000A480 TD_PS_SAMPLER8_BORDER_RED
+0x0000A490 TD_PS_SAMPLER9_BORDER_RED
+0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
+0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
+0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
+0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
+0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
+0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
+0x0000A500 TD_PS_SAMPLER16_BORDER_RED
+0x0000A510 TD_PS_SAMPLER17_BORDER_RED
+0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
+0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
+0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
+0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
+0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
+0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
+0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
+0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
+0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
+0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
+0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
+0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
+0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
+0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
+0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
+0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
+0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
+0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
+0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
+0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
+0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
+0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
+0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
+0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
+0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
+0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
+0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
+0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
+0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
+0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
+0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
+0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
+0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
+0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
+0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
+0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
+0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
+0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
+0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
+0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
+0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
+0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
+0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
+0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
+0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
+0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
+0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
+0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
+0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
+0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
+0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
+0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
+0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
+0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
+0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
+0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
+0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
+0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
+0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
+0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
+0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
+0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
+0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
+0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
+0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
+0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
+0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
+0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
+0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
+0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
+0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
+0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
+0x0000A600 TD_VS_SAMPLER0_BORDER_RED
+0x0000A610 TD_VS_SAMPLER1_BORDER_RED
+0x0000A620 TD_VS_SAMPLER2_BORDER_RED
+0x0000A630 TD_VS_SAMPLER3_BORDER_RED
+0x0000A640 TD_VS_SAMPLER4_BORDER_RED
+0x0000A650 TD_VS_SAMPLER5_BORDER_RED
+0x0000A660 TD_VS_SAMPLER6_BORDER_RED
+0x0000A670 TD_VS_SAMPLER7_BORDER_RED
+0x0000A680 TD_VS_SAMPLER8_BORDER_RED
+0x0000A690 TD_VS_SAMPLER9_BORDER_RED
+0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
+0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
+0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
+0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
+0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
+0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
+0x0000A700 TD_VS_SAMPLER16_BORDER_RED
+0x0000A710 TD_VS_SAMPLER17_BORDER_RED
+0x00009508 TA_CNTL_AUX
+0x0002802C DB_DEPTH_CLEAR
+0x00028D34 DB_PREFETCH_LIMIT
+0x00028D30 DB_PRELOAD_CONTROL
+0x00028D0C DB_RENDER_CONTROL
+0x00028D10 DB_RENDER_OVERRIDE
+0x0002880C DB_SHADER_CONTROL
+0x00028D28 DB_SRESULTS_COMPARE_STATE0
+0x00028D2C DB_SRESULTS_COMPARE_STATE1
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028028 DB_STENCIL_CLEAR
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x00028804 CB_BLEND_CONTROL
+0x00028420 CB_BLEND_ALPHA
+0x0002841C CB_BLEND_BLUE
+0x00028418 CB_BLEND_GREEN
+0x00028414 CB_BLEND_RED
+0x0002812C CB_CLEAR_ALPHA
+0x00028128 CB_CLEAR_BLUE
+0x00028124 CB_CLEAR_GREEN
+0x00028120 CB_CLEAR_RED
+0x00028C30 CB_CLRCMP_CONTROL
+0x00028C38 CB_CLRCMP_DST
+0x00028C3C CB_CLRCMP_MSK
+0x00028C34 CB_CLRCMP_SRC
+0x0002842C CB_FOG_BLUE
+0x00028428 CB_FOG_GREEN
+0x00028424 CB_FOG_RED
+0x00008040 WAIT_UNTIL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009838 DB_WATERMARKS
+0x00028D44 DB_ALPHA_TO_MASK
+0x00009700 VC_CNTL


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/r600
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/rn50
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/rn50	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/rn50	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,30 @@
+rn50 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/rn50
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/rs600
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/rs600	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/rs600	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,780 @@
+rs600 0x6d40
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/rs600
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/reg_srcs/rv515
===================================================================
--- trunk/sys/dev/drm2/radeon/reg_srcs/rv515	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/reg_srcs/rv515	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,496 @@
+rv515 0x6d40
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x208C VAP_INDEX_OFFSET
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x2218 VAP_TEX_TO_COLOR_CNTL
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x2500 VAP_PVS_FLOW_CNTL_ADDRS_LW_0
+0x2504 VAP_PVS_FLOW_CNTL_ADDRS_UW_0
+0x2508 VAP_PVS_FLOW_CNTL_ADDRS_LW_1
+0x250C VAP_PVS_FLOW_CNTL_ADDRS_UW_1
+0x2510 VAP_PVS_FLOW_CNTL_ADDRS_LW_2
+0x2514 VAP_PVS_FLOW_CNTL_ADDRS_UW_2
+0x2518 VAP_PVS_FLOW_CNTL_ADDRS_LW_3
+0x251C VAP_PVS_FLOW_CNTL_ADDRS_UW_3
+0x2520 VAP_PVS_FLOW_CNTL_ADDRS_LW_4
+0x2524 VAP_PVS_FLOW_CNTL_ADDRS_UW_4
+0x2528 VAP_PVS_FLOW_CNTL_ADDRS_LW_5
+0x252C VAP_PVS_FLOW_CNTL_ADDRS_UW_5
+0x2530 VAP_PVS_FLOW_CNTL_ADDRS_LW_6
+0x2534 VAP_PVS_FLOW_CNTL_ADDRS_UW_6
+0x2538 VAP_PVS_FLOW_CNTL_ADDRS_LW_7
+0x253C VAP_PVS_FLOW_CNTL_ADDRS_UW_7
+0x2540 VAP_PVS_FLOW_CNTL_ADDRS_LW_8
+0x2544 VAP_PVS_FLOW_CNTL_ADDRS_UW_8
+0x2548 VAP_PVS_FLOW_CNTL_ADDRS_LW_9
+0x254C VAP_PVS_FLOW_CNTL_ADDRS_UW_9
+0x2550 VAP_PVS_FLOW_CNTL_ADDRS_LW_10
+0x2554 VAP_PVS_FLOW_CNTL_ADDRS_UW_10
+0x2558 VAP_PVS_FLOW_CNTL_ADDRS_LW_11
+0x255C VAP_PVS_FLOW_CNTL_ADDRS_UW_11
+0x2560 VAP_PVS_FLOW_CNTL_ADDRS_LW_12
+0x2564 VAP_PVS_FLOW_CNTL_ADDRS_UW_12
+0x2568 VAP_PVS_FLOW_CNTL_ADDRS_LW_13
+0x256C VAP_PVS_FLOW_CNTL_ADDRS_UW_13
+0x2570 VAP_PVS_FLOW_CNTL_ADDRS_LW_14
+0x2574 VAP_PVS_FLOW_CNTL_ADDRS_UW_14
+0x2578 VAP_PVS_FLOW_CNTL_ADDRS_LW_15
+0x257C VAP_PVS_FLOW_CNTL_ADDRS_UW_15
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4114 SU_TEX_WRAP_PS3
+0x4118 PS3_ENABLE
+0x411c PS3_VTX_FMT
+0x4120 PS3_TEX_SOURCE
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4258 GA_COLOR_CONTROL_PS3
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4074 RS_IP_0
+0x4078 RS_IP_1
+0x407C RS_IP_2
+0x4080 RS_IP_3
+0x4084 RS_IP_4
+0x4088 RS_IP_5
+0x408C RS_IP_6
+0x4090 RS_IP_7
+0x4094 RS_IP_8
+0x4098 RS_IP_9
+0x409C RS_IP_10
+0x40A0 RS_IP_11
+0x40A4 RS_IP_12
+0x40A8 RS_IP_13
+0x40AC RS_IP_14
+0x40B0 RS_IP_15
+0x4320 RS_INST_0
+0x4324 RS_INST_1
+0x4328 RS_INST_2
+0x432C RS_INST_3
+0x4330 RS_INST_4
+0x4334 RS_INST_5
+0x4338 RS_INST_6
+0x433C RS_INST_7
+0x4340 RS_INST_8
+0x4344 RS_INST_9
+0x4348 RS_INST_10
+0x434C RS_INST_11
+0x4350 RS_INST_12
+0x4354 RS_INST_13
+0x4358 RS_INST_14
+0x435C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4250 GA_US_VECTOR_INDEX
+0x4254 GA_US_VECTOR_DATA
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4620 US_FC_BOOL_CONST
+0x4624 US_FC_CTRL
+0x4630 US_CODE_ADDR
+0x4634 US_CODE_RANGE
+0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4BE0 FG_ALPHA_VALUE
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4EF8 RB3D_CONSTANT_COLOR_AR
+0x4EFC RB3D_CONSTANT_COLOR_GB
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4FD4 ZB_STENCILREFMASK_BF


Property changes on: trunk/sys/dev/drm2/radeon/reg_srcs/rv515
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rn50_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rn50_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rn50_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,32 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rn50_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned rn50_reg_safe_bm[102] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+	0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF,
+};


Property changes on: trunk/sys/dev/drm2/radeon/rn50_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rs100d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rs100d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rs100d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,44 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS100D_H__
+#define __RS100D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rs100d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* Registers */
+#define R_00015C_NB_TOM                              0x00015C
+#define   S_00015C_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_00015C_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_00015C_MC_FB_START                         0xFFFF0000
+#define   S_00015C_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_00015C_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_00015C_MC_FB_TOP                           0x0000FFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rs100d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rs400.c
===================================================================
--- trunk/sys/dev/drm2/radeon/rs400.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rs400.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,566 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rs400.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rs400d.h"
+
+/* This files gather functions specifics to : rs400,rs480 */
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+void rs400_gart_adjust_size(struct radeon_device *rdev)
+{
+	/* Check gart size */
+	switch (rdev->mc.gtt_size/(1024*1024)) {
+	case 32:
+	case 64:
+	case 128:
+	case 256:
+	case 512:
+	case 1024:
+	case 2048:
+		break;
+	default:
+		DRM_ERROR("Unable to use IGP GART size %uM\n",
+			  (unsigned)(rdev->mc.gtt_size >> 20));
+		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
+		DRM_ERROR("Forcing to 32M GART size\n");
+		rdev->mc.gtt_size = 32 * 1024 * 1024;
+		return;
+	}
+}
+
+void rs400_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	unsigned int timeout = rdev->usec_timeout;
+
+	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
+	do {
+		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
+			break;
+		DRM_UDELAY(1);
+		timeout--;
+	} while (timeout > 0);
+	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
+}
+
+int rs400_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.ptr) {
+		DRM_ERROR("RS400 GART already initialized\n");
+		return 0;
+	}
+	/* Check gart size */
+	switch(rdev->mc.gtt_size / (1024 * 1024)) {
+	case 32:
+	case 64:
+	case 128:
+	case 256:
+	case 512:
+	case 1024:
+	case 2048:
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	if (rs400_debugfs_pcie_gart_info_init(rdev))
+		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	return radeon_gart_table_ram_alloc(rdev);
+}
+
+int rs400_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t size_reg;
+	uint32_t tmp;
+
+	radeon_gart_restore(rdev);
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+	/* Check gart size */
+	switch(rdev->mc.gtt_size / (1024 * 1024)) {
+	case 32:
+		size_reg = RS480_VA_SIZE_32MB;
+		break;
+	case 64:
+		size_reg = RS480_VA_SIZE_64MB;
+		break;
+	case 128:
+		size_reg = RS480_VA_SIZE_128MB;
+		break;
+	case 256:
+		size_reg = RS480_VA_SIZE_256MB;
+		break;
+	case 512:
+		size_reg = RS480_VA_SIZE_512MB;
+		break;
+	case 1024:
+		size_reg = RS480_VA_SIZE_1GB;
+		break;
+	case 2048:
+		size_reg = RS480_VA_SIZE_2GB;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* It should be fine to program it to max value */
+	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
+		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
+	} else {
+		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
+		WREG32(RS480_AGP_BASE_2, 0);
+	}
+	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
+	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
+		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+		WREG32(RADEON_BUS_CNTL, tmp);
+	} else {
+		WREG32(RADEON_MC_AGP_LOCATION, tmp);
+		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+		WREG32(RADEON_BUS_CNTL, tmp);
+	}
+	/* Table should be in 32bits address space so ignore bits above. */
+	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
+	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
+
+	WREG32_MC(RS480_GART_BASE, tmp);
+	/* TODO: more tweaking here */
+	WREG32_MC(RS480_GART_FEATURE_ID,
+		  (RS480_TLB_ENABLE |
+		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
+	/* Disable snooping */
+	WREG32_MC(RS480_AGP_MODE_CNTL,
+		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
+	/* Disable AGP mode */
+	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
+	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+		WREG32_MC(RS480_MC_MISC_CNTL,
+			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
+	} else {
+		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+	}
+	/* Enable gart */
+	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
+	rs400_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void rs400_gart_disable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+}
+
+void rs400_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rs400_gart_disable(rdev);
+	radeon_gart_table_ram_free(rdev);
+}
+
+#define RS400_PTE_WRITEABLE (1 << 2)
+#define RS400_PTE_READABLE  (1 << 3)
+
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+	uint32_t entry;
+	u32 *gtt = rdev->gart.ptr;
+
+	if (i < 0 || i > rdev->gart.num_gpu_pages) {
+		return -EINVAL;
+	}
+
+	entry = (lower_32_bits(addr) & ~PAGE_MASK) |
+		((upper_32_bits(addr) & 0xff) << 4) |
+		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
+	entry = cpu_to_le32(entry);
+	gtt[i] = entry;
+	return 0;
+}
+
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & RADEON_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void rs400_gpu_init(struct radeon_device *rdev)
+{
+	/* FIXME: is this correct ? */
+	r420_pipes_init(rdev);
+	if (rs400_mc_wait_for_idle(rdev)) {
+		DRM_ERROR("rs400: Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
+	}
+}
+
+static void rs400_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	rs400_gart_adjust_size(rdev);
+	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
+	/* DDR for all card after R300 & IGP */
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	r100_vram_init_sizes(rdev);
+	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
+	r = RREG32(RS480_NB_MC_DATA);
+	WREG32(RS480_NB_MC_INDEX, 0xff);
+	return r;
+}
+
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
+	WREG32(RS480_NB_MC_DATA, (v));
+	WREG32(RS480_NB_MC_INDEX, 0xff);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(RADEON_HOST_PATH_CNTL);
+	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
+	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
+		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
+		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
+		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
+		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
+		tmp = RREG32(RS690_HDP_FB_LOCATION);
+		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
+	} else {
+		tmp = RREG32(RADEON_AGP_BASE);
+		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+		tmp = RREG32(RS480_AGP_BASE_2);
+		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
+		tmp = RREG32(RADEON_MC_AGP_LOCATION);
+		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+	}
+	tmp = RREG32_MC(RS480_GART_BASE);
+	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
+	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
+	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x5F);
+	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
+	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x3B);
+	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x3C);
+	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x30);
+	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x31);
+	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x32);
+	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x33);
+	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x34);
+	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x35);
+	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x36);
+	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x37);
+	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rs400_gart_info_list[] = {
+	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
+};
+#endif
+
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static void rs400_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs400_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+
+	r100_mc_resume(rdev, &save);
+}
+
+static int rs400_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	r100_set_common_regs(rdev);
+
+	rs400_mc_program(rdev);
+	/* Resume clock */
+	r300_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs400_gpu_init(rdev);
+	r100_enable_bm(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs400_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int rs400_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs400_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r300_clock_startup(rdev);
+	/* setup MC before calling post tables */
+	rs400_mc_program(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs400_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs400_suspend(struct radeon_device *rdev)
+{
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	rs400_gart_disable(rdev);
+	return 0;
+}
+
+void rs400_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs400_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+int rs400_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs400_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs400_gart_init(rdev);
+	if (r)
+		return r;
+	r300_set_reg_safe(rdev);
+
+	rdev->accel_working = true;
+	r = rs400_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs400_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/rs400.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rs400d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rs400d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rs400d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,164 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS400D_H__
+#define __RS400D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rs400d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00015C_NB_TOM                              0x00015C
+#define   S_00015C_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_00015C_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_00015C_MC_FB_START                         0xFFFF0000
+#define   S_00015C_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_00015C_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_00015C_MC_FB_TOP                           0x0000FFFF
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rs400d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rs600.c
===================================================================
--- trunk/sys/dev/drm2/radeon/rs600.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rs600.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1082 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+/* RS600 / Radeon X1250/X1270 integrated GPU
+ *
+ * This file gather function specific to RS600 which is the IGP of
+ * the X1250/X1270 family supporting intel CPU (while RS690/RS740
+ * is the X1250/X1270 supporting AMD CPU). The display engine are
+ * the avivo one, bios is an atombios, 3D block are the one of the
+ * R4XX family. The GART is different from the RS400 one and is very
+ * close to the one of the R600 family (R600 likely being an evolution
+ * of the RS600 GART block).
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rs600.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rs600d.h"
+
+#include "rs600_reg_safe.h"
+
+static void rs600_gpu_init(struct radeon_device *rdev);
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+#endif
+
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+		return true;
+	else
+		return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 pos1, pos2;
+
+	pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	if (pos1 != pos2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
+void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+		return;
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (avivo_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!avivo_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!avivo_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!avivo_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+	int i;
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+	int requested_index = rdev->pm.requested_power_state_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+	u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+	u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp |= voltage->gpio.mask;
+			else
+				tmp &= ~(voltage->gpio.mask);
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		} else {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp &= ~voltage->gpio.mask;
+			else
+				tmp |= voltage->gpio.mask;
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		}
+	} else if (voltage->type == VOLTAGE_VDDC)
+		radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
+
+	dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+		} else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+		}
+	} else {
+		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+	}
+	WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+	dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+		dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+		if (voltage->delay) {
+			dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+			dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+		} else
+			dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+	} else
+		dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+	WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+	hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+		hdp_dyn_cntl &= ~HDP_FORCEON;
+	else
+		hdp_dyn_cntl |= HDP_FORCEON;
+	WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+	/* mc_host_dyn seems to cause hangs from time to time */
+	mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+		mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+	else
+		mc_host_dyn_cntl |= MC_HOST_FORCEON;
+	WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+	dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+		dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+	else
+		dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+	WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+	/* set pcie lanes */
+	if ((rdev->flags & RADEON_IS_PCIE) &&
+	    !(rdev->flags & RADEON_IS_IGP) &&
+	    rdev->asic->pm.set_pcie_lanes &&
+	    (ps->pcie_lanes !=
+	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+		radeon_set_pcie_lanes(rdev,
+				      ps->pcie_lanes);
+		DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+	}
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+		if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+		if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = rs600_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		else
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		else
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+			break;
+		default:
+			break;
+		}
+		enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+			break;
+		default:
+			break;
+		}
+		disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	pci_save_state(device_get_parent(rdev->dev));
+	/* disable bus mastering */
+	pci_disable_busmaster(rdev->dev);
+	mdelay(1);
+	/* reset GA+VAP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+					S_0000F0_SOFT_RESET_GA(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset CP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset MC */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(device_get_parent(rdev->dev));
+	/* Check if GPU is idle */
+	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	rv515_mc_resume(rdev, &save);
+	return ret;
+}
+
+/*
+ * GART.
+ */
+void rs600_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+}
+
+static int rs600_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		DRM_ERROR("RS600 GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r) {
+		return r;
+	}
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+static int rs600_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Enable bus master */
+	tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+	WREG32(RADEON_BUS_CNTL, tmp);
+	/* FIXME: setup default page */
+	WREG32_MC(R_000100_MC_PT0_CNTL,
+		  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+		   S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
+	for (i = 0; i < 19; i++) {
+		WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
+			  S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+			  S_00016C_SYSTEM_ACCESS_MODE_MASK(
+				  V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+			  S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+				  V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+			  S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+			  S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+			  S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
+	}
+	/* enable first context */
+	WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
+		  S_000102_ENABLE_PAGE_TABLE(1) |
+		  S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
+	/* disable all other contexts */
+	for (i = 1; i < 8; i++)
+		WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
+
+	/* setup the page table */
+	WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+		  rdev->gart.table_addr);
+	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
+	WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+	/* System context maps to VRAM space */
+	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
+	/* enable page tables */
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
+	tmp = RREG32_MC(R_000009_MC_CNTL1);
+	WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
+	rs600_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void rs600_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* FIXME: disable out of gart access */
+	WREG32_MC(R_000100_MC_PT0_CNTL, 0);
+	tmp = RREG32_MC(R_000009_MC_CNTL1);
+	WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void rs600_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rs600_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+#define R600_PTE_VALID     (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+	uint64_t *ptr = rdev->gart.ptr;
+
+	if (i < 0 || i > rdev->gart.num_gpu_pages) {
+		return -EINVAL;
+	}
+	addr = addr & 0xFFFFFFFFFFFFF000ULL;
+	addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+	addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+	ptr[i] = addr;
+	return 0;
+}
+
+int rs600_irq_set(struct radeon_device *rdev)
+{
+	uint32_t tmp = 0;
+	uint32_t mode_int = 0;
+	u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+		~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+	u32 hdmi0;
+	if (ASIC_IS_DCE2(rdev))
+		hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+			~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	else
+		hdmi0 = 0;
+
+	if (!rdev->irq.installed) {
+		DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n");
+		WREG32(R_000040_GEN_INT_CNTL, 0);
+		return -EINVAL;
+	}
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		tmp |= S_000040_SW_INT_EN(1);
+	}
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
+	}
+	if (rdev->irq.hpd[0]) {
+		hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	}
+	if (rdev->irq.hpd[1]) {
+		hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+	}
+	if (rdev->irq.afmt[0]) {
+		hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	}
+	WREG32(R_000040_GEN_INT_CNTL, tmp);
+	WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+	WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+	if (ASIC_IS_DCE2(rdev))
+		WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+	return 0;
+}
+
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
+{
+	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
+	uint32_t irq_mask = S_000044_SW_INT(1);
+	u32 tmp;
+
+	if (G_000044_DISPLAY_INT_STAT(irqs)) {
+		rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			WREG32(R_006534_D1MODE_VBLANK_STATUS,
+				S_006534_D1MODE_VBLANK_ACK(1));
+		}
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			WREG32(R_006D34_D2MODE_VBLANK_STATUS,
+				S_006D34_D2MODE_VBLANK_ACK(1));
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
+	} else {
+		rdev->irq.stat_regs.r500.disp_int = 0;
+	}
+
+	if (ASIC_IS_DCE2(rdev)) {
+		rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
+			S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
+		if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+			tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
+			tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
+			WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		}
+	} else
+		rdev->irq.stat_regs.r500.hdmi0_status = 0;
+
+	if (irqs) {
+		WREG32(R_000044_GEN_INT_STATUS, irqs);
+	}
+	return irqs & irq_mask;
+}
+
+void rs600_irq_disable(struct radeon_device *rdev)
+{
+	u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+		~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+	WREG32(R_000040_GEN_INT_CNTL, 0);
+	WREG32(R_006540_DxMODE_INT_MASK, 0);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	rs600_irq_ack(rdev);
+}
+
+irqreturn_t rs600_irq_process(struct radeon_device *rdev)
+{
+	u32 status, msi_rearm;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+
+	status = rs600_irq_ack(rdev);
+	if (!status &&
+	    !rdev->irq.stat_regs.r500.disp_int &&
+	    !rdev->irq.stat_regs.r500.hdmi0_status) {
+		return IRQ_NONE;
+	}
+	while (status ||
+	       rdev->irq.stat_regs.r500.disp_int ||
+	       rdev->irq.stat_regs.r500.hdmi0_status) {
+		/* SW interrupt */
+		if (G_000044_SW_INT(status)) {
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+		}
+		/* Vertical blank interrupts */
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				DRM_WAKEUP(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[0]))
+				radeon_crtc_handle_flip(rdev, 0);
+		}
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				DRM_WAKEUP(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[1]))
+				radeon_crtc_handle_flip(rdev, 1);
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
+		if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+			queue_hdmi = true;
+			DRM_DEBUG("HDMI0\n");
+		}
+		status = rs600_irq_ack(rdev);
+	}
+	if (queue_hotplug)
+		taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
+	if (queue_hdmi)
+		taskqueue_enqueue(rdev->tq, &rdev->audio_work);
+	if (rdev->msi_enabled) {
+		switch (rdev->family) {
+		case CHIP_RS600:
+		case CHIP_RS690:
+		case CHIP_RS740:
+			msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
+			WREG32(RADEON_BUS_CNTL, msi_rearm);
+			WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
+			break;
+		default:
+			WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+			break;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0)
+		return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
+	else
+		return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
+}
+
+int rs600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+static void rs600_gpu_init(struct radeon_device *rdev)
+{
+	r420_pipes_init(rdev);
+	/* Wait for mc idle */
+	if (rs600_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+}
+
+static void rs600_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	base = RREG32_MC(R_000004_MC_FB_LOCATION);
+	base = G_000004_MC_FB_START(base) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rs600_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+	/* FIXME: implement full support */
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	if (rdev->disp_priority == 2) {
+		d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
+		d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
+		d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+		d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+	}
+}
+
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+		S_000070_MC_IND_CITF_ARB0(1));
+	return RREG32(R_000074_MC_IND_DATA);
+}
+
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+		S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
+	WREG32(R_000074_MC_IND_DATA, v);
+}
+
+static void rs600_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev))
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+}
+
+void rs600_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+}
+
+static void rs600_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs600_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+
+	/* FIXME: What does AGP means for such chipset ? */
+	WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
+	WREG32_MC(R_000006_AGP_BASE, 0);
+	WREG32_MC(R_000007_AGP_BASE_2, 0);
+	/* Program MC */
+	WREG32_MC(R_000004_MC_FB_LOCATION,
+			S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int rs600_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rs600_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs600_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs600_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing audio\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rs600_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs600_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs600_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs600_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	rs600_gart_disable(rdev);
+	return 0;
+}
+
+void rs600_fini(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs600_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+int rs600_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	rv515_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs600_mc_init(rdev);
+	rs600_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs600_gart_init(rdev);
+	if (r)
+		return r;
+	rs600_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = rs600_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs600_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/rs600.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rs600_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rs600_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rs600_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,61 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rs600_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned rs600_reg_safe_bm[219] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+	0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
+	0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFC48, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
+	0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE00BFF,
+	0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
+	0x00000000, 0x00000100, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0xFF800000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x0003FC0B, 0xFFFFFCFF, 0xFFBFFB99, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+};


Property changes on: trunk/sys/dev/drm2/radeon/rs600_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rs600d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rs600d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rs600d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,689 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS600D_H__
+#define __RS600D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rs600d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* Registers */
+#define R_000040_GEN_INT_CNTL                        0x000040
+#define   S_000040_SCRATCH_INT_MASK(x)                 (((x) & 0x1) << 18)
+#define   G_000040_SCRATCH_INT_MASK(x)                 (((x) >> 18) & 0x1)
+#define   C_000040_SCRATCH_INT_MASK                    0xFFFBFFFF
+#define   S_000040_GUI_IDLE_MASK(x)                    (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE_MASK(x)                    (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE_MASK                       0xFFF7FFFF
+#define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
+#define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
+#define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
+#define   S_000040_DMA_VIPH2_INT_EN(x)                 (((x) & 0x1) << 14)
+#define   G_000040_DMA_VIPH2_INT_EN(x)                 (((x) >> 14) & 0x1)
+#define   C_000040_DMA_VIPH2_INT_EN                    0xFFFFBFFF
+#define   S_000040_DMA_VIPH3_INT_EN(x)                 (((x) & 0x1) << 15)
+#define   G_000040_DMA_VIPH3_INT_EN(x)                 (((x) >> 15) & 0x1)
+#define   C_000040_DMA_VIPH3_INT_EN                    0xFFFF7FFF
+#define   S_000040_I2C_INT_EN(x)                       (((x) & 0x1) << 17)
+#define   G_000040_I2C_INT_EN(x)                       (((x) >> 17) & 0x1)
+#define   C_000040_I2C_INT_EN                          0xFFFDFFFF
+#define   S_000040_GUI_IDLE(x)                         (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE(x)                         (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE                            0xFFF7FFFF
+#define   S_000040_VIPH_INT_EN(x)                      (((x) & 0x1) << 24)
+#define   G_000040_VIPH_INT_EN(x)                      (((x) >> 24) & 0x1)
+#define   C_000040_VIPH_INT_EN                         0xFEFFFFFF
+#define   S_000040_SW_INT_EN(x)                        (((x) & 0x1) << 25)
+#define   G_000040_SW_INT_EN(x)                        (((x) >> 25) & 0x1)
+#define   C_000040_SW_INT_EN                           0xFDFFFFFF
+#define   S_000040_GEYSERVILLE(x)                      (((x) & 0x1) << 27)
+#define   G_000040_GEYSERVILLE(x)                      (((x) >> 27) & 0x1)
+#define   C_000040_GEYSERVILLE                         0xF7FFFFFF
+#define   S_000040_HDCP_AUTHORIZED_INT(x)              (((x) & 0x1) << 28)
+#define   G_000040_HDCP_AUTHORIZED_INT(x)              (((x) >> 28) & 0x1)
+#define   C_000040_HDCP_AUTHORIZED_INT                 0xEFFFFFFF
+#define   S_000040_DVI_I2C_INT(x)                      (((x) & 0x1) << 29)
+#define   G_000040_DVI_I2C_INT(x)                      (((x) >> 29) & 0x1)
+#define   C_000040_DVI_I2C_INT                         0xDFFFFFFF
+#define   S_000040_GUIDMA(x)                           (((x) & 0x1) << 30)
+#define   G_000040_GUIDMA(x)                           (((x) >> 30) & 0x1)
+#define   C_000040_GUIDMA                              0xBFFFFFFF
+#define   S_000040_VIDDMA(x)                           (((x) & 0x1) << 31)
+#define   G_000040_VIDDMA(x)                           (((x) >> 31) & 0x1)
+#define   C_000040_VIDDMA                              0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS                      0x000044
+#define   S_000044_DISPLAY_INT_STAT(x)                 (((x) & 0x1) << 0)
+#define   G_000044_DISPLAY_INT_STAT(x)                 (((x) >> 0) & 0x1)
+#define   C_000044_DISPLAY_INT_STAT                    0xFFFFFFFE
+#define   S_000044_VGA_INT_STAT(x)                     (((x) & 0x1) << 1)
+#define   G_000044_VGA_INT_STAT(x)                     (((x) >> 1) & 0x1)
+#define   C_000044_VGA_INT_STAT                        0xFFFFFFFD
+#define   S_000044_CAP0_INT_ACTIVE(x)                  (((x) & 0x1) << 8)
+#define   G_000044_CAP0_INT_ACTIVE(x)                  (((x) >> 8) & 0x1)
+#define   C_000044_CAP0_INT_ACTIVE                     0xFFFFFEFF
+#define   S_000044_DMA_VIPH0_INT(x)                    (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT(x)                    (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT                       0xFFFFEFFF
+#define   S_000044_DMA_VIPH1_INT(x)                    (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT(x)                    (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT                       0xFFFFDFFF
+#define   S_000044_DMA_VIPH2_INT(x)                    (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT(x)                    (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT                       0xFFFFBFFF
+#define   S_000044_DMA_VIPH3_INT(x)                    (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT(x)                    (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT                       0xFFFF7FFF
+#define   S_000044_MC_PROBE_FAULT_STAT(x)              (((x) & 0x1) << 16)
+#define   G_000044_MC_PROBE_FAULT_STAT(x)              (((x) >> 16) & 0x1)
+#define   C_000044_MC_PROBE_FAULT_STAT                 0xFFFEFFFF
+#define   S_000044_I2C_INT(x)                          (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT(x)                          (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT                             0xFFFDFFFF
+#define   S_000044_SCRATCH_INT_STAT(x)                 (((x) & 0x1) << 18)
+#define   G_000044_SCRATCH_INT_STAT(x)                 (((x) >> 18) & 0x1)
+#define   C_000044_SCRATCH_INT_STAT                    0xFFFBFFFF
+#define   S_000044_GUI_IDLE_STAT(x)                    (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT(x)                    (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT                       0xFFF7FFFF
+#define   S_000044_ATI_OVERDRIVE_INT_STAT(x)           (((x) & 0x1) << 20)
+#define   G_000044_ATI_OVERDRIVE_INT_STAT(x)           (((x) >> 20) & 0x1)
+#define   C_000044_ATI_OVERDRIVE_INT_STAT              0xFFEFFFFF
+#define   S_000044_MC_PROTECTION_FAULT_STAT(x)         (((x) & 0x1) << 21)
+#define   G_000044_MC_PROTECTION_FAULT_STAT(x)         (((x) >> 21) & 0x1)
+#define   C_000044_MC_PROTECTION_FAULT_STAT            0xFFDFFFFF
+#define   S_000044_RBBM_READ_INT_STAT(x)               (((x) & 0x1) << 22)
+#define   G_000044_RBBM_READ_INT_STAT(x)               (((x) >> 22) & 0x1)
+#define   C_000044_RBBM_READ_INT_STAT                  0xFFBFFFFF
+#define   S_000044_CB_CONTEXT_SWITCH_STAT(x)           (((x) & 0x1) << 23)
+#define   G_000044_CB_CONTEXT_SWITCH_STAT(x)           (((x) >> 23) & 0x1)
+#define   C_000044_CB_CONTEXT_SWITCH_STAT              0xFF7FFFFF
+#define   S_000044_VIPH_INT(x)                         (((x) & 0x1) << 24)
+#define   G_000044_VIPH_INT(x)                         (((x) >> 24) & 0x1)
+#define   C_000044_VIPH_INT                            0xFEFFFFFF
+#define   S_000044_SW_INT(x)                           (((x) & 0x1) << 25)
+#define   G_000044_SW_INT(x)                           (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT                              0xFDFFFFFF
+#define   S_000044_SW_INT_SET(x)                       (((x) & 0x1) << 26)
+#define   G_000044_SW_INT_SET(x)                       (((x) >> 26) & 0x1)
+#define   C_000044_SW_INT_SET                          0xFBFFFFFF
+#define   S_000044_IDCT_INT_STAT(x)                    (((x) & 0x1) << 27)
+#define   G_000044_IDCT_INT_STAT(x)                    (((x) >> 27) & 0x1)
+#define   C_000044_IDCT_INT_STAT                       0xF7FFFFFF
+#define   S_000044_GUIDMA_STAT(x)                      (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_STAT(x)                      (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_STAT                         0xBFFFFFFF
+#define   S_000044_VIDDMA_STAT(x)                      (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_STAT(x)                      (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_STAT                         0x7FFFFFFF
+#define R_00004C_BUS_CNTL                            0x00004C
+#define   S_00004C_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 14)
+#define   G_00004C_BUS_MASTER_DIS(x)                   (((x) >> 14) & 0x1)
+#define   C_00004C_BUS_MASTER_DIS                      0xFFFFBFFF
+#define   S_00004C_BUS_MSI_REARM(x)                    (((x) & 0x1) << 20)
+#define   G_00004C_BUS_MSI_REARM(x)                    (((x) >> 20) & 0x1)
+#define   C_00004C_BUS_MSI_REARM                       0xFFEFFFFF
+#define R_000070_MC_IND_INDEX                        0x000070
+#define   S_000070_MC_IND_ADDR(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000070_MC_IND_ADDR(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000070_MC_IND_ADDR                         0xFFFF0000
+#define   S_000070_MC_IND_SEQ_RBS_0(x)                 (((x) & 0x1) << 16)
+#define   G_000070_MC_IND_SEQ_RBS_0(x)                 (((x) >> 16) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_0                    0xFFFEFFFF
+#define   S_000070_MC_IND_SEQ_RBS_1(x)                 (((x) & 0x1) << 17)
+#define   G_000070_MC_IND_SEQ_RBS_1(x)                 (((x) >> 17) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_1                    0xFFFDFFFF
+#define   S_000070_MC_IND_SEQ_RBS_2(x)                 (((x) & 0x1) << 18)
+#define   G_000070_MC_IND_SEQ_RBS_2(x)                 (((x) >> 18) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_2                    0xFFFBFFFF
+#define   S_000070_MC_IND_SEQ_RBS_3(x)                 (((x) & 0x1) << 19)
+#define   G_000070_MC_IND_SEQ_RBS_3(x)                 (((x) >> 19) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_3                    0xFFF7FFFF
+#define   S_000070_MC_IND_AIC_RBS(x)                   (((x) & 0x1) << 20)
+#define   G_000070_MC_IND_AIC_RBS(x)                   (((x) >> 20) & 0x1)
+#define   C_000070_MC_IND_AIC_RBS                      0xFFEFFFFF
+#define   S_000070_MC_IND_CITF_ARB0(x)                 (((x) & 0x1) << 21)
+#define   G_000070_MC_IND_CITF_ARB0(x)                 (((x) >> 21) & 0x1)
+#define   C_000070_MC_IND_CITF_ARB0                    0xFFDFFFFF
+#define   S_000070_MC_IND_CITF_ARB1(x)                 (((x) & 0x1) << 22)
+#define   G_000070_MC_IND_CITF_ARB1(x)                 (((x) >> 22) & 0x1)
+#define   C_000070_MC_IND_CITF_ARB1                    0xFFBFFFFF
+#define   S_000070_MC_IND_WR_EN(x)                     (((x) & 0x1) << 23)
+#define   G_000070_MC_IND_WR_EN(x)                     (((x) >> 23) & 0x1)
+#define   C_000070_MC_IND_WR_EN                        0xFF7FFFFF
+#define   S_000070_MC_IND_RD_INV(x)                    (((x) & 0x1) << 24)
+#define   G_000070_MC_IND_RD_INV(x)                    (((x) >> 24) & 0x1)
+#define   C_000070_MC_IND_RD_INV                       0xFEFFFFFF
+#define R_000074_MC_IND_DATA                         0x000074
+#define   S_000074_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_000074_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000074_MC_IND_DATA                         0x00000000
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT           0x0060A4
+#define   S_0060A4_D1CRTC_FRAME_COUNT(x)               (((x) & 0xFFFFFF) << 0)
+#define   G_0060A4_D1CRTC_FRAME_COUNT(x)               (((x) >> 0) & 0xFFFFFF)
+#define   C_0060A4_D1CRTC_FRAME_COUNT                  0xFF000000
+#define R_006534_D1MODE_VBLANK_STATUS                0x006534
+#define   S_006534_D1MODE_VBLANK_OCCURRED(x)           (((x) & 0x1) << 0)
+#define   G_006534_D1MODE_VBLANK_OCCURRED(x)           (((x) >> 0) & 0x1)
+#define   C_006534_D1MODE_VBLANK_OCCURRED              0xFFFFFFFE
+#define   S_006534_D1MODE_VBLANK_ACK(x)                (((x) & 0x1) << 4)
+#define   G_006534_D1MODE_VBLANK_ACK(x)                (((x) >> 4) & 0x1)
+#define   C_006534_D1MODE_VBLANK_ACK                   0xFFFFFFEF
+#define   S_006534_D1MODE_VBLANK_STAT(x)               (((x) & 0x1) << 12)
+#define   G_006534_D1MODE_VBLANK_STAT(x)               (((x) >> 12) & 0x1)
+#define   C_006534_D1MODE_VBLANK_STAT                  0xFFFFEFFF
+#define   S_006534_D1MODE_VBLANK_INTERRUPT(x)          (((x) & 0x1) << 16)
+#define   G_006534_D1MODE_VBLANK_INTERRUPT(x)          (((x) >> 16) & 0x1)
+#define   C_006534_D1MODE_VBLANK_INTERRUPT             0xFFFEFFFF
+#define R_006540_DxMODE_INT_MASK                     0x006540
+#define   S_006540_D1MODE_VBLANK_INT_MASK(x)           (((x) & 0x1) << 0)
+#define   G_006540_D1MODE_VBLANK_INT_MASK(x)           (((x) >> 0) & 0x1)
+#define   C_006540_D1MODE_VBLANK_INT_MASK              0xFFFFFFFE
+#define   S_006540_D1MODE_VLINE_INT_MASK(x)            (((x) & 0x1) << 4)
+#define   G_006540_D1MODE_VLINE_INT_MASK(x)            (((x) >> 4) & 0x1)
+#define   C_006540_D1MODE_VLINE_INT_MASK               0xFFFFFFEF
+#define   S_006540_D2MODE_VBLANK_INT_MASK(x)           (((x) & 0x1) << 8)
+#define   G_006540_D2MODE_VBLANK_INT_MASK(x)           (((x) >> 8) & 0x1)
+#define   C_006540_D2MODE_VBLANK_INT_MASK              0xFFFFFEFF
+#define   S_006540_D2MODE_VLINE_INT_MASK(x)            (((x) & 0x1) << 12)
+#define   G_006540_D2MODE_VLINE_INT_MASK(x)            (((x) >> 12) & 0x1)
+#define   C_006540_D2MODE_VLINE_INT_MASK               0xFFFFEFFF
+#define   S_006540_D1MODE_VBLANK_CP_SEL(x)             (((x) & 0x1) << 30)
+#define   G_006540_D1MODE_VBLANK_CP_SEL(x)             (((x) >> 30) & 0x1)
+#define   C_006540_D1MODE_VBLANK_CP_SEL                0xBFFFFFFF
+#define   S_006540_D2MODE_VBLANK_CP_SEL(x)             (((x) & 0x1) << 31)
+#define   G_006540_D2MODE_VBLANK_CP_SEL(x)             (((x) >> 31) & 0x1)
+#define   C_006540_D2MODE_VBLANK_CP_SEL                0x7FFFFFFF
+#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT           0x0068A4
+#define   S_0068A4_D2CRTC_FRAME_COUNT(x)               (((x) & 0xFFFFFF) << 0)
+#define   G_0068A4_D2CRTC_FRAME_COUNT(x)               (((x) >> 0) & 0xFFFFFF)
+#define   C_0068A4_D2CRTC_FRAME_COUNT                  0xFF000000
+#define R_006D34_D2MODE_VBLANK_STATUS                0x006D34
+#define   S_006D34_D2MODE_VBLANK_OCCURRED(x)           (((x) & 0x1) << 0)
+#define   G_006D34_D2MODE_VBLANK_OCCURRED(x)           (((x) >> 0) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_OCCURRED              0xFFFFFFFE
+#define   S_006D34_D2MODE_VBLANK_ACK(x)                (((x) & 0x1) << 4)
+#define   G_006D34_D2MODE_VBLANK_ACK(x)                (((x) >> 4) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_ACK                   0xFFFFFFEF
+#define   S_006D34_D2MODE_VBLANK_STAT(x)               (((x) & 0x1) << 12)
+#define   G_006D34_D2MODE_VBLANK_STAT(x)               (((x) >> 12) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_STAT                  0xFFFFEFFF
+#define   S_006D34_D2MODE_VBLANK_INTERRUPT(x)          (((x) & 0x1) << 16)
+#define   G_006D34_D2MODE_VBLANK_INTERRUPT(x)          (((x) >> 16) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_INTERRUPT             0xFFFEFFFF
+#define R_007EDC_DISP_INTERRUPT_STATUS               0x007EDC
+#define   S_007EDC_LB_D1_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 4)
+#define   G_007EDC_LB_D1_VBLANK_INTERRUPT(x)           (((x) >> 4) & 0x1)
+#define   C_007EDC_LB_D1_VBLANK_INTERRUPT              0xFFFFFFEF
+#define   S_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 5)
+#define   G_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) >> 5) & 0x1)
+#define   C_007EDC_LB_D2_VBLANK_INTERRUPT              0xFFFFFFDF
+#define   S_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 16)
+#define   G_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) >> 16) & 0x1)
+#define   C_007EDC_DACA_AUTODETECT_INTERRUPT           0xFFFEFFFF
+#define   S_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 17)
+#define   G_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) >> 17) & 0x1)
+#define   C_007EDC_DACB_AUTODETECT_INTERRUPT           0xFFFDFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) & 0x1) << 18)
+#define   G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) >> 18) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT       0xFFFBFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) & 0x1) << 19)
+#define   G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) >> 19) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT       0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL               0x007828
+#define   S_007828_DACA_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007828_DACA_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007828_DACA_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007828_DACA_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL           0x007838
+#define   S_007838_DACA_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007838_DACA_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007838_DACA_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL               0x007A28
+#define   S_007A28_DACB_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007A28_DACB_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL           0x007A38
+#define   S_007A38_DACB_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007A38_DACB_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007A38_DACB_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL           0x007D00
+#define   S_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D00_DC_HOT_PLUG_DETECT1_EN              0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS        0x007D04
+#define   S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS      0xFFFFFFFE
+#define   S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_SENSE           0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL       0x007D08
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK         0xFFFFFFFE
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY    0xFFFFFEFF
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_EN          0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL           0x007D10
+#define   S_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D10_DC_HOT_PLUG_DETECT2_EN              0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS        0x007D14
+#define   S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS      0xFFFFFFFE
+#define   S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_SENSE           0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL       0x007D18
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK         0xFFFFFFFE
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY    0xFFFFFEFF
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_EN          0xFFFEFFFF
+#define R_007404_HDMI0_STATUS                          0x007404
+#define   S_007404_HDMI0_AZ_FORMAT_WTRIG(x)            (((x) & 0x1) << 28)
+#define   G_007404_HDMI0_AZ_FORMAT_WTRIG(x)            (((x) >> 28) & 0x1)
+#define   C_007404_HDMI0_AZ_FORMAT_WTRIG               0xEFFFFFFF
+#define   S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x)        (((x) & 0x1) << 29)
+#define   G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x)        (((x) >> 29) & 0x1)
+#define   C_007404_HDMI0_AZ_FORMAT_WTRIG_INT           0xDFFFFFFF
+#define R_007408_HDMI0_AUDIO_PACKET_CONTROL            0x007408
+#define   S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x)       (((x) & 0x1) << 28)
+#define   G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x)       (((x) >> 28) & 0x1)
+#define   C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK          0xEFFFFFFF
+#define   S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x)        (((x) & 0x1) << 29)
+#define   G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x)        (((x) >> 29) & 0x1)
+#define   C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK           0xDFFFFFFF
+
+/* MC registers */
+#define R_000000_MC_STATUS                           0x000000
+#define   S_000000_MC_IDLE(x)                          (((x) & 0x1) << 0)
+#define   G_000000_MC_IDLE(x)                          (((x) >> 0) & 0x1)
+#define   C_000000_MC_IDLE                             0xFFFFFFFE
+#define R_000004_MC_FB_LOCATION                      0x000004
+#define   S_000004_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000004_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000004_MC_FB_START                         0xFFFF0000
+#define   S_000004_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000004_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000004_MC_FB_TOP                           0x0000FFFF
+#define R_000005_MC_AGP_LOCATION                     0x000005
+#define   S_000005_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000005_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000005_MC_AGP_START                        0xFFFF0000
+#define   S_000005_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000005_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000005_MC_AGP_TOP                          0x0000FFFF
+#define R_000006_AGP_BASE                            0x000006
+#define   S_000006_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000006_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000006_AGP_BASE_ADDR                       0x00000000
+#define R_000007_AGP_BASE_2                          0x000007
+#define   S_000007_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000007_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000007_AGP_BASE_ADDR_2                     0xFFFFFFF0
+#define R_000009_MC_CNTL1                            0x000009
+#define   S_000009_ENABLE_PAGE_TABLES(x)               (((x) & 0x1) << 26)
+#define   G_000009_ENABLE_PAGE_TABLES(x)               (((x) >> 26) & 0x1)
+#define   C_000009_ENABLE_PAGE_TABLES                  0xFBFFFFFF
+/* FIXME don't know the various field size need feedback from AMD */
+#define R_000100_MC_PT0_CNTL                         0x000100
+#define   S_000100_ENABLE_PT(x)                        (((x) & 0x1) << 0)
+#define   G_000100_ENABLE_PT(x)                        (((x) >> 0) & 0x1)
+#define   C_000100_ENABLE_PT                           0xFFFFFFFE
+#define   S_000100_EFFECTIVE_L2_CACHE_SIZE(x)          (((x) & 0x7) << 15)
+#define   G_000100_EFFECTIVE_L2_CACHE_SIZE(x)          (((x) >> 15) & 0x7)
+#define   C_000100_EFFECTIVE_L2_CACHE_SIZE             0xFFFC7FFF
+#define   S_000100_EFFECTIVE_L2_QUEUE_SIZE(x)          (((x) & 0x7) << 21)
+#define   G_000100_EFFECTIVE_L2_QUEUE_SIZE(x)          (((x) >> 21) & 0x7)
+#define   C_000100_EFFECTIVE_L2_QUEUE_SIZE             0xFF1FFFFF
+#define   S_000100_INVALIDATE_ALL_L1_TLBS(x)           (((x) & 0x1) << 28)
+#define   G_000100_INVALIDATE_ALL_L1_TLBS(x)           (((x) >> 28) & 0x1)
+#define   C_000100_INVALIDATE_ALL_L1_TLBS              0xEFFFFFFF
+#define   S_000100_INVALIDATE_L2_CACHE(x)              (((x) & 0x1) << 29)
+#define   G_000100_INVALIDATE_L2_CACHE(x)              (((x) >> 29) & 0x1)
+#define   C_000100_INVALIDATE_L2_CACHE                 0xDFFFFFFF
+#define R_000102_MC_PT0_CONTEXT0_CNTL                0x000102
+#define   S_000102_ENABLE_PAGE_TABLE(x)                (((x) & 0x1) << 0)
+#define   G_000102_ENABLE_PAGE_TABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000102_ENABLE_PAGE_TABLE                   0xFFFFFFFE
+#define   S_000102_PAGE_TABLE_DEPTH(x)                 (((x) & 0x3) << 1)
+#define   G_000102_PAGE_TABLE_DEPTH(x)                 (((x) >> 1) & 0x3)
+#define   C_000102_PAGE_TABLE_DEPTH                    0xFFFFFFF9
+#define   V_000102_PAGE_TABLE_FLAT                     0
+/* R600 documentation suggest that this should be a number of pages */
+#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR     0x000112
+#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR    0x000114
+#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR   0x00011C
+#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR      0x00012C
+#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR     0x00013C
+#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR       0x00014C
+#define R_00016C_MC_PT0_CLIENT0_CNTL                 0x00016C
+#define   S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
+#define   G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
+#define   C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE    0xFFFFFFFE
+#define   S_00016C_TRANSLATION_MODE_OVERRIDE(x)        (((x) & 0x1) << 1)
+#define   G_00016C_TRANSLATION_MODE_OVERRIDE(x)        (((x) >> 1) & 0x1)
+#define   C_00016C_TRANSLATION_MODE_OVERRIDE           0xFFFFFFFD
+#define   S_00016C_SYSTEM_ACCESS_MODE_MASK(x)          (((x) & 0x3) << 8)
+#define   G_00016C_SYSTEM_ACCESS_MODE_MASK(x)          (((x) >> 8) & 0x3)
+#define   C_00016C_SYSTEM_ACCESS_MODE_MASK             0xFFFFFCFF
+#define   V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY          0
+#define   V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP      1
+#define   V_00016C_SYSTEM_ACCESS_MODE_IN_SYS           2
+#define   V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS       3
+#define   S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x)  (((x) & 0x1) << 10)
+#define   G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x)  (((x) >> 10) & 0x1)
+#define   C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS     0xFFFFFBFF
+#define   V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH  0
+#define   V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
+#define   S_00016C_EFFECTIVE_L1_CACHE_SIZE(x)          (((x) & 0x7) << 11)
+#define   G_00016C_EFFECTIVE_L1_CACHE_SIZE(x)          (((x) >> 11) & 0x7)
+#define   C_00016C_EFFECTIVE_L1_CACHE_SIZE             0xFFFFC7FF
+#define   S_00016C_ENABLE_FRAGMENT_PROCESSING(x)       (((x) & 0x1) << 14)
+#define   G_00016C_ENABLE_FRAGMENT_PROCESSING(x)       (((x) >> 14) & 0x1)
+#define   C_00016C_ENABLE_FRAGMENT_PROCESSING          0xFFFFBFFF
+#define   S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x)          (((x) & 0x7) << 15)
+#define   G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x)          (((x) >> 15) & 0x7)
+#define   C_00016C_EFFECTIVE_L1_QUEUE_SIZE             0xFFFC7FFF
+#define   S_00016C_INVALIDATE_L1_TLB(x)                (((x) & 0x1) << 20)
+#define   G_00016C_INVALIDATE_L1_TLB(x)                (((x) >> 20) & 0x1)
+#define   C_00016C_INVALIDATE_L1_TLB                   0xFFEFFFFF
+
+#define R_006548_D1MODE_PRIORITY_A_CNT               0x006548
+#define   S_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006548_D1MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT               0x00654C
+#define   S_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_00654C_D1MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006D48_D2MODE_PRIORITY_A_CNT               0x006D48
+#define   S_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D48_D2MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT               0x006D4C
+#define   S_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D4C_D2MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+
+/* PLL regs */
+#define GENERAL_PWRMGT                                 0x8
+#define   GLOBAL_PWRMGT_EN                             (1 << 0)
+#define   MOBILE_SU                                    (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH                         0xc
+#define   NORMAL_POWER_SCLK_HILEN(x)                   ((x) << 0)
+#define   NORMAL_POWER_SCLK_LOLEN(x)                   ((x) << 4)
+#define   REDUCED_POWER_SCLK_HILEN(x)                  ((x) << 8)
+#define   REDUCED_POWER_SCLK_LOLEN(x)                  ((x) << 12)
+#define   POWER_D1_SCLK_HILEN(x)                       ((x) << 16)
+#define   POWER_D1_SCLK_LOLEN(x)                       ((x) << 20)
+#define   STATIC_SCREEN_HILEN(x)                       ((x) << 24)
+#define   STATIC_SCREEN_LOLEN(x)                       ((x) << 28)
+#define DYN_SCLK_VOL_CNTL                              0xe
+#define   IO_CG_VOLTAGE_DROP                           (1 << 0)
+#define   VOLTAGE_DROP_SYNC                            (1 << 2)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 3)
+#define HDP_DYN_CNTL                                   0x10
+#define   HDP_FORCEON                                  (1 << 0)
+#define MC_HOST_DYN_CNTL                               0x1e
+#define   MC_HOST_FORCEON                              (1 << 0)
+#define DYN_BACKBIAS_CNTL                              0x29
+#define   IO_CG_BACKBIAS_EN                            (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL                     0x7ee0
+#define   PWRDN_WAIT_BUSY_OFF                          (1 << 0)
+#define   PWRDN_WAIT_PWRSEQ_OFF                        (1 << 4)
+#define   PWRDN_WAIT_PPLL_OFF                          (1 << 8)
+#define   PWRUP_WAIT_PPLL_ON                           (1 << 12)
+#define   PWRUP_WAIT_MEM_INIT_DONE                     (1 << 16)
+#define   PM_ASSERT_RESET                              (1 << 20)
+#define   PM_PWRDN_PPLL                                (1 << 24)
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rs600d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rs690.c
===================================================================
--- trunk/sys/dev/drm2/radeon/rs690.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rs690.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,780 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rs690.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rs690d.h"
+
+int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
+		if (G_000090_MC_SYSTEM_IDLE(tmp))
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+static void rs690_gpu_init(struct radeon_device *rdev)
+{
+	/* FIXME: is this correct ? */
+	r420_pipes_init(rdev);
+	if (rs690_mc_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
+};
+
+void rs690_pm_info(struct radeon_device *rdev)
+{
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *info;
+	uint16_t data_offset;
+	uint8_t frev, crev;
+	fixed20_12 tmp;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		info = (union igp_info *)((uintptr_t)rdev->mode_info.atom_context->bios + data_offset);
+
+		/* Get various system informations from bios */
+		switch (crev) {
+		case 1:
+			tmp.full = dfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
+			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			if (le16_to_cpu(info->info.usK8MemoryClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+			else if (rdev->clock.default_mclk) {
+				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+				rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			} else
+				rdev->pm.igp_system_mclk.full = dfixed_const(400);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+			rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
+			break;
+		case 2:
+			tmp.full = dfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
+			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
+			else if (rdev->clock.default_mclk)
+				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+			else
+				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
+			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
+			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+			break;
+		default:
+			/* We assume the slower possible clock ie worst case */
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+			rdev->pm.igp_system_mclk.full = dfixed_const(200);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+			rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+			DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+			break;
+		}
+	} else {
+		/* We assume the slower possible clock ie worst case */
+		rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+		rdev->pm.igp_system_mclk.full = dfixed_const(200);
+		rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+		rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+		DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+	}
+	/* Compute various bandwidth */
+	/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */
+	tmp.full = dfixed_const(4);
+	rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
+	/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
+	 *              = ht_clk * ht_width / 5
+	 */
+	tmp.full = dfixed_const(5);
+	rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
+						rdev->pm.igp_ht_link_width);
+	rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
+	if (tmp.full < rdev->pm.max_bandwidth.full) {
+		/* HT link is a limiting factor */
+		rdev->pm.max_bandwidth.full = tmp.full;
+	}
+	/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
+	 *                    = (sideport_clk * 14) / 10
+	 */
+	tmp.full = dfixed_const(14);
+	rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+	tmp.full = dfixed_const(10);
+	rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
+}
+
+static void rs690_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	rs400_gart_adjust_size(rdev);
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+	base = G_000100_MC_FB_START(base) << 16;
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	rs690_pm_info(rdev);
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+			      struct drm_display_mode *mode1,
+			      struct drm_display_mode *mode2)
+{
+	u32 tmp;
+
+	/*
+	 * Line Buffer Setup
+	 * There is a single line buffer shared by both display controllers.
+	 * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning can either be done
+	 * manually or via one of four preset allocations specified in bits 1:0:
+	 *  0 - line buffer is divided in half and shared between crtc
+	 *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
+	 *  2 - D1 gets the whole buffer
+	 *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
+	 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
+	 * allocation mode. In manual allocation mode, D1 always starts at 0,
+	 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
+	 */
+	tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
+	tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
+	/* auto */
+	if (mode1 && mode2) {
+		if (mode1->hdisplay > mode2->hdisplay) {
+			if (mode1->hdisplay > 2560)
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
+			else
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+		} else if (mode2->hdisplay > mode1->hdisplay) {
+			if (mode2->hdisplay > 2560)
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+			else
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+		} else
+			tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+	} else if (mode1) {
+		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
+	} else if (mode2) {
+		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+	}
+	WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
+}
+
+struct rs690_watermark {
+	u32        lb_request_fifo_depth;
+	fixed20_12 num_line_pair;
+	fixed20_12 estimated_width;
+	fixed20_12 worst_case_latency;
+	fixed20_12 consumption_rate;
+	fixed20_12 active_time;
+	fixed20_12 dbpp;
+	fixed20_12 priority_mark_max;
+	fixed20_12 priority_mark;
+	fixed20_12 sclk;
+};
+
+static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+				  struct radeon_crtc *crtc,
+				  struct rs690_watermark *wm)
+{
+	struct drm_display_mode *mode = &crtc->base.mode;
+	fixed20_12 a, b, c;
+	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
+	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
+
+	if (!crtc->base.enabled) {
+		/* FIXME: wouldn't it better to set priority mark to maximum */
+		wm->lb_request_fifo_depth = 4;
+		return;
+	}
+
+	if (crtc->vsc.full > dfixed_const(2))
+		wm->num_line_pair.full = dfixed_const(2);
+	else
+		wm->num_line_pair.full = dfixed_const(1);
+
+	b.full = dfixed_const(mode->crtc_hdisplay);
+	c.full = dfixed_const(256);
+	a.full = dfixed_div(b, c);
+	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+	if (a.full < dfixed_const(4)) {
+		wm->lb_request_fifo_depth = 4;
+	} else {
+		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
+	}
+
+	/* Determine consumption rate
+	 *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
+	 *  vtaps = number of vertical taps,
+	 *  vsc = vertical scaling ratio, defined as source/destination
+	 *  hsc = horizontal scaling ration, defined as source/destination
+	 */
+	a.full = dfixed_const(mode->clock);
+	b.full = dfixed_const(1000);
+	a.full = dfixed_div(a, b);
+	pclk.full = dfixed_div(b, a);
+	if (crtc->rmx_type != RMX_OFF) {
+		b.full = dfixed_const(2);
+		if (crtc->vsc.full > b.full)
+			b.full = crtc->vsc.full;
+		b.full = dfixed_mul(b, crtc->hsc);
+		c.full = dfixed_const(2);
+		b.full = dfixed_div(b, c);
+		consumption_time.full = dfixed_div(pclk, b);
+	} else {
+		consumption_time.full = pclk.full;
+	}
+	a.full = dfixed_const(1);
+	wm->consumption_rate.full = dfixed_div(a, consumption_time);
+
+
+	/* Determine line time
+	 *  LineTime = total time for one line of displayhtotal
+	 *  LineTime = total number of horizontal pixels
+	 *  pclk = pixel clock period(ns)
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	line_time.full = dfixed_mul(a, pclk);
+
+	/* Determine active time
+	 *  ActiveTime = time of active region of display within one line,
+	 *  hactive = total number of horizontal active pixels
+	 *  htotal = total number of horizontal pixels
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->active_time.full = dfixed_mul(line_time, b);
+	wm->active_time.full = dfixed_div(wm->active_time, a);
+
+	/* Maximun bandwidth is the minimun bandwidth of all component */
+	rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
+	if (rdev->mc.igp_sideport_enabled) {
+		if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+			rdev->pm.sideport_bandwidth.full)
+			rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
+		read_delay_latency.full = dfixed_const(370 * 800);
+		a.full = dfixed_const(1000);
+		b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
+		read_delay_latency.full = dfixed_div(read_delay_latency, b);
+		read_delay_latency.full = dfixed_mul(read_delay_latency, a);
+	} else {
+		if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+			rdev->pm.k8_bandwidth.full)
+			rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth;
+		if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
+			rdev->pm.ht_bandwidth.full)
+			rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
+		read_delay_latency.full = dfixed_const(5000);
+	}
+
+	/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
+	a.full = dfixed_const(16);
+	rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+	a.full = dfixed_const(1000);
+	rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
+	/* Determine chunk time
+	 * ChunkTime = the time it takes the DCP to send one chunk of data
+	 * to the LB which consists of pipeline delay and inter chunk gap
+	 * sclk = system clock(ns)
+	 */
+	a.full = dfixed_const(256 * 13);
+	chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+	a.full = dfixed_const(10);
+	chunk_time.full = dfixed_div(chunk_time, a);
+
+	/* Determine the worst case latency
+	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
+	 * WorstCaseLatency = worst case time from urgent to when the MC starts
+	 *                    to return data
+	 * READ_DELAY_IDLE_MAX = constant of 1us
+	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
+	 *             which consists of pipeline delay and inter chunk gap
+	 */
+	if (dfixed_trunc(wm->num_line_pair) > 1) {
+		a.full = dfixed_const(3);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	} else {
+		a.full = dfixed_const(2);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	}
+
+	/* Determine the tolerable latency
+	 * TolerableLatency = Any given request has only 1 line time
+	 *                    for the data to be returned
+	 * LBRequestFifoDepth = Number of chunk requests the LB can
+	 *                      put into the request FIFO for a display
+	 *  LineTime = total time for one line of display
+	 *  ChunkTime = the time it takes the DCP to send one chunk
+	 *              of data to the LB which consists of
+	 *  pipeline delay and inter chunk gap
+	 */
+	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
+		tolerable_latency.full = line_time.full;
+	} else {
+		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
+		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
+		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
+		tolerable_latency.full = line_time.full - tolerable_latency.full;
+	}
+	/* We assume worst case 32bits (4 bytes) */
+	wm->dbpp.full = dfixed_const(4 * 8);
+
+	/* Determine the maximum priority mark
+	 *  width = viewport width in pixels
+	 */
+	a.full = dfixed_const(16);
+	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
+
+	/* Determine estimated width */
+	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+	estimated_width.full = dfixed_div(estimated_width, consumption_time);
+	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+		wm->priority_mark.full = dfixed_const(10);
+	} else {
+		a.full = dfixed_const(16);
+		wm->priority_mark.full = dfixed_div(estimated_width, a);
+		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
+		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+	}
+}
+
+void rs690_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	struct rs690_watermark wm0;
+	struct rs690_watermark wm1;
+	u32 tmp;
+	u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+	u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+	fixed20_12 priority_mark02, priority_mark12, fill_rate;
+	fixed20_12 a, b;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	/*
+	 * Set display0/1 priority up in the memory controller for
+	 * modes if the user specifies HIGH for displaypriority
+	 * option.
+	 */
+	if ((rdev->disp_priority == 2) &&
+	    ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
+		tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
+		tmp &= C_000104_MC_DISP0R_INIT_LAT;
+		tmp &= C_000104_MC_DISP1R_INIT_LAT;
+		if (mode0)
+			tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
+		if (mode1)
+			tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
+		WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
+	}
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
+		WREG32(R_006C9C_DCP_CONTROL, 0);
+	if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+		WREG32(R_006C9C_DCP_CONTROL, 2);
+
+	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
+	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
+
+	tmp = (wm0.lb_request_fifo_depth - 1);
+	tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
+	WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
+
+	if (mode0 && mode1) {
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
+		else
+			a.full = wm0.num_line_pair.full;
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
+		else
+			b.full = wm1.num_line_pair.full;
+		a.full += b.full;
+		fill_rate.full = dfixed_div(wm0.sclk, a);
+		if (wm0.consumption_rate.full > fill_rate.full) {
+			b.full = wm0.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm1.consumption_rate.full > fill_rate.full) {
+			b.full = wm1.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm0.priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark.full;
+		if (wm0.priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark_max.full;
+		if (wm1.priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark.full;
+		if (wm1.priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark_max.full;
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2) {
+			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+		}
+	} else if (mode0) {
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
+		else
+			a.full = wm0.num_line_pair.full;
+		fill_rate.full = dfixed_div(wm0.sclk, a);
+		if (wm0.consumption_rate.full > fill_rate.full) {
+			b.full = wm0.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm0.priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark.full;
+		if (wm0.priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark_max.full;
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		if (rdev->disp_priority == 2)
+			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+	} else if (mode1) {
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
+		else
+			a.full = wm1.num_line_pair.full;
+		fill_rate.full = dfixed_div(wm1.sclk, a);
+		if (wm1.consumption_rate.full > fill_rate.full) {
+			b.full = wm1.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm1.priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark.full;
+		if (wm1.priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark_max.full;
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2)
+			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+	}
+
+	WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+	WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+	WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+	WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+}
+
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
+	r = RREG32(R_00007C_MC_DATA);
+	WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
+	return r;
+}
+
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
+		S_000078_MC_IND_WR_EN(1));
+	WREG32(R_00007C_MC_DATA, v);
+	WREG32(R_000078_MC_INDEX, 0x7F);
+}
+
+static void rs690_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs690_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000100_MCCFG_FB_LOCATION,
+			S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int rs690_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rs690_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs690_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs400_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing audio\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rs690_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs400_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs690_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs690_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	rs400_gart_disable(rdev);
+	return 0;
+}
+
+void rs690_fini(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs400_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+int rs690_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	rv515_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs690_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs400_gart_init(rdev);
+	if (r)
+		return r;
+	rs600_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = rs690_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs400_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/radeon/rs690.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rs690d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rs690d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rs690d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,314 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS690D_H__
+#define __RS690D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rs690d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* Registers */
+#define R_000078_MC_INDEX                            0x000078
+#define   S_000078_MC_IND_ADDR(x)                      (((x) & 0x1FF) << 0)
+#define   G_000078_MC_IND_ADDR(x)                      (((x) >> 0) & 0x1FF)
+#define   C_000078_MC_IND_ADDR                         0xFFFFFE00
+#define   S_000078_MC_IND_WR_EN(x)                     (((x) & 0x1) << 9)
+#define   G_000078_MC_IND_WR_EN(x)                     (((x) >> 9) & 0x1)
+#define   C_000078_MC_IND_WR_EN                        0xFFFFFDFF
+#define R_00007C_MC_DATA                             0x00007C
+#define   S_00007C_MC_DATA(x)                          (((x) & 0xFFFFFFFF) << 0)
+#define   G_00007C_MC_DATA(x)                          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00007C_MC_DATA                             0x00000000
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_006520_DC_LB_MEMORY_SPLIT                  0x006520
+#define   S_006520_DC_LB_MEMORY_SPLIT(x)               (((x) & 0x3) << 0)
+#define   G_006520_DC_LB_MEMORY_SPLIT(x)               (((x) >> 0) & 0x3)
+#define   C_006520_DC_LB_MEMORY_SPLIT                  0xFFFFFFFC
+#define   S_006520_DC_LB_MEMORY_SPLIT_MODE(x)          (((x) & 0x1) << 2)
+#define   G_006520_DC_LB_MEMORY_SPLIT_MODE(x)          (((x) >> 2) & 0x1)
+#define   C_006520_DC_LB_MEMORY_SPLIT_MODE             0xFFFFFFFB
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF    0
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q      1
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY          2
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q      3
+#define   S_006520_DC_LB_DISP1_END_ADR(x)              (((x) & 0x7FF) << 4)
+#define   G_006520_DC_LB_DISP1_END_ADR(x)              (((x) >> 4) & 0x7FF)
+#define   C_006520_DC_LB_DISP1_END_ADR                 0xFFFF800F
+#define R_006548_D1MODE_PRIORITY_A_CNT               0x006548
+#define   S_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006548_D1MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT               0x00654C
+#define   S_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_00654C_D1MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006C9C_DCP_CONTROL                         0x006C9C
+#define R_006D48_D2MODE_PRIORITY_A_CNT               0x006D48
+#define   S_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D48_D2MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT               0x006D4C
+#define   S_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D4C_D2MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006D58_LB_MAX_REQ_OUTSTANDING              0x006D58
+#define   S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x)        (((x) & 0xF) << 0)
+#define   G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x)        (((x) >> 0) & 0xF)
+#define   C_006D58_LB_D1_MAX_REQ_OUTSTANDING           0xFFFFFFF0
+#define   S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x)        (((x) & 0xF) << 16)
+#define   G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x)        (((x) >> 16) & 0xF)
+#define   C_006D58_LB_D2_MAX_REQ_OUTSTANDING           0xFFF0FFFF
+
+
+#define R_000090_MC_SYSTEM_STATUS                    0x000090
+#define   S_000090_MC_SYSTEM_IDLE(x)                   (((x) & 0x1) << 0)
+#define   G_000090_MC_SYSTEM_IDLE(x)                   (((x) >> 0) & 0x1)
+#define   C_000090_MC_SYSTEM_IDLE                      0xFFFFFFFE
+#define   S_000090_MC_SEQUENCER_IDLE(x)                (((x) & 0x1) << 1)
+#define   G_000090_MC_SEQUENCER_IDLE(x)                (((x) >> 1) & 0x1)
+#define   C_000090_MC_SEQUENCER_IDLE                   0xFFFFFFFD
+#define   S_000090_MC_ARBITER_IDLE(x)                  (((x) & 0x1) << 2)
+#define   G_000090_MC_ARBITER_IDLE(x)                  (((x) >> 2) & 0x1)
+#define   C_000090_MC_ARBITER_IDLE                     0xFFFFFFFB
+#define   S_000090_MC_SELECT_PM(x)                     (((x) & 0x1) << 3)
+#define   G_000090_MC_SELECT_PM(x)                     (((x) >> 3) & 0x1)
+#define   C_000090_MC_SELECT_PM                        0xFFFFFFF7
+#define   S_000090_RESERVED4(x)                        (((x) & 0xF) << 4)
+#define   G_000090_RESERVED4(x)                        (((x) >> 4) & 0xF)
+#define   C_000090_RESERVED4                           0xFFFFFF0F
+#define   S_000090_RESERVED8(x)                        (((x) & 0xF) << 8)
+#define   G_000090_RESERVED8(x)                        (((x) >> 8) & 0xF)
+#define   C_000090_RESERVED8                           0xFFFFF0FF
+#define   S_000090_RESERVED12(x)                       (((x) & 0xF) << 12)
+#define   G_000090_RESERVED12(x)                       (((x) >> 12) & 0xF)
+#define   C_000090_RESERVED12                          0xFFFF0FFF
+#define   S_000090_MCA_INIT_EXECUTED(x)                (((x) & 0x1) << 16)
+#define   G_000090_MCA_INIT_EXECUTED(x)                (((x) >> 16) & 0x1)
+#define   C_000090_MCA_INIT_EXECUTED                   0xFFFEFFFF
+#define   S_000090_MCA_IDLE(x)                         (((x) & 0x1) << 17)
+#define   G_000090_MCA_IDLE(x)                         (((x) >> 17) & 0x1)
+#define   C_000090_MCA_IDLE                            0xFFFDFFFF
+#define   S_000090_MCA_SEQ_IDLE(x)                     (((x) & 0x1) << 18)
+#define   G_000090_MCA_SEQ_IDLE(x)                     (((x) >> 18) & 0x1)
+#define   C_000090_MCA_SEQ_IDLE                        0xFFFBFFFF
+#define   S_000090_MCA_ARB_IDLE(x)                     (((x) & 0x1) << 19)
+#define   G_000090_MCA_ARB_IDLE(x)                     (((x) >> 19) & 0x1)
+#define   C_000090_MCA_ARB_IDLE                        0xFFF7FFFF
+#define   S_000090_RESERVED20(x)                       (((x) & 0xFFF) << 20)
+#define   G_000090_RESERVED20(x)                       (((x) >> 20) & 0xFFF)
+#define   C_000090_RESERVED20                          0x000FFFFF
+#define R_000100_MCCFG_FB_LOCATION                   0x000100
+#define   S_000100_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000100_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000100_MC_FB_START                         0xFFFF0000
+#define   S_000100_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000100_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000100_MC_FB_TOP                           0x0000FFFF
+#define R_000104_MC_INIT_MISC_LAT_TIMER              0x000104
+#define   S_000104_MC_CPR_INIT_LAT(x)                  (((x) & 0xF) << 0)
+#define   G_000104_MC_CPR_INIT_LAT(x)                  (((x) >> 0) & 0xF)
+#define   C_000104_MC_CPR_INIT_LAT                     0xFFFFFFF0
+#define   S_000104_MC_VF_INIT_LAT(x)                   (((x) & 0xF) << 4)
+#define   G_000104_MC_VF_INIT_LAT(x)                   (((x) >> 4) & 0xF)
+#define   C_000104_MC_VF_INIT_LAT                      0xFFFFFF0F
+#define   S_000104_MC_DISP0R_INIT_LAT(x)               (((x) & 0xF) << 8)
+#define   G_000104_MC_DISP0R_INIT_LAT(x)               (((x) >> 8) & 0xF)
+#define   C_000104_MC_DISP0R_INIT_LAT                  0xFFFFF0FF
+#define   S_000104_MC_DISP1R_INIT_LAT(x)               (((x) & 0xF) << 12)
+#define   G_000104_MC_DISP1R_INIT_LAT(x)               (((x) >> 12) & 0xF)
+#define   C_000104_MC_DISP1R_INIT_LAT                  0xFFFF0FFF
+#define   S_000104_MC_FIXED_INIT_LAT(x)                (((x) & 0xF) << 16)
+#define   G_000104_MC_FIXED_INIT_LAT(x)                (((x) >> 16) & 0xF)
+#define   C_000104_MC_FIXED_INIT_LAT                   0xFFF0FFFF
+#define   S_000104_MC_E2R_INIT_LAT(x)                  (((x) & 0xF) << 20)
+#define   G_000104_MC_E2R_INIT_LAT(x)                  (((x) >> 20) & 0xF)
+#define   C_000104_MC_E2R_INIT_LAT                     0xFF0FFFFF
+#define   S_000104_SAME_PAGE_PRIO(x)                   (((x) & 0xF) << 24)
+#define   G_000104_SAME_PAGE_PRIO(x)                   (((x) >> 24) & 0xF)
+#define   C_000104_SAME_PAGE_PRIO                      0xF0FFFFFF
+#define   S_000104_MC_GLOBW_INIT_LAT(x)                (((x) & 0xF) << 28)
+#define   G_000104_MC_GLOBW_INIT_LAT(x)                (((x) >> 28) & 0xF)
+#define   C_000104_MC_GLOBW_INIT_LAT                   0x0FFFFFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rs690d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rv200d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rv200d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rv200d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,40 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV200D_H__
+#define __RV200D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rv200d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#define R_00015C_AGP_BASE_2                          0x00015C
+#define   S_00015C_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_00015C_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_00015C_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rv200d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rv250d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rv250d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rv250d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,127 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV250D_H__
+#define __RV250D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rv250d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+#define R_00000D_SCLK_CNTL_M6                        0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rv250d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rv350d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rv350d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rv350d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,56 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV350D_H__
+#define __RV350D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rv350d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* RV350, RV380 registers */
+/* #define R_00000D_SCLK_CNTL                           0x00000D */
+#define   S_00000D_FORCE_VAP(x)                        (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_VAP(x)                        (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_VAP                           0xFFDFFFFF
+#define   S_00000D_FORCE_SR(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_SR(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_SR                            0xFDFFFFFF
+#define   S_00000D_FORCE_PX(x)                         (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_PX(x)                         (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_PX                            0xFBFFFFFF
+#define   S_00000D_FORCE_TX(x)                         (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TX(x)                         (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TX                            0xF7FFFFFF
+#define   S_00000D_FORCE_US(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_US(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_US                            0xEFFFFFFF
+#define   S_00000D_FORCE_SU(x)                         (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SU(x)                         (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SU                            0xBFFFFFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rv350d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rv515.c
===================================================================
--- trunk/sys/dev/drm2/radeon/rv515.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rv515.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1251 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rv515.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "rv515d.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rv515_reg_safe.h"
+
+/* This files gather functions specifics to: rv515 */
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+static void rv515_gpu_init(struct radeon_device *rdev);
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+#endif
+
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+void rv515_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+	}
+	if (rv515_debugfs_pipes_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+	if (rv515_debugfs_ga_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+}
+
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+
+	r = radeon_ring_lock(rdev, ring, 64);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  ISYNC_ANY2D_IDLE3D |
+			  ISYNC_ANY3D_IDLE2D |
+			  ISYNC_WAIT_IDLEGUI |
+			  ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+	radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+	radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+	radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+	radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+	radeon_ring_write(ring,
+			  ((6 << MS_X0_SHIFT) |
+			   (6 << MS_Y0_SHIFT) |
+			   (6 << MS_X1_SHIFT) |
+			   (6 << MS_Y1_SHIFT) |
+			   (6 << MS_X2_SHIFT) |
+			   (6 << MS_Y2_SHIFT) |
+			   (6 << MSBD0_Y_SHIFT) |
+			   (6 << MSBD0_X_SHIFT)));
+	radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+	radeon_ring_write(ring,
+			  ((6 << MS_X3_SHIFT) |
+			   (6 << MS_Y3_SHIFT) |
+			   (6 << MS_X4_SHIFT) |
+			   (6 << MS_Y4_SHIFT) |
+			   (6 << MS_X5_SHIFT) |
+			   (6 << MS_Y5_SHIFT) |
+			   (6 << MSBD1_SHIFT)));
+	radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+	radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+	radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+	radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+	radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+	radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+	radeon_ring_write(ring, PACKET0(0x20C8, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring);
+}
+
+int rv515_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(MC_STATUS);
+		if (tmp & MC_STATUS_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+void rv515_vga_render_disable(struct radeon_device *rdev)
+{
+	WREG32(R_000300_VGA_RENDER_CONTROL,
+		RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+}
+
+static void rv515_gpu_init(struct radeon_device *rdev)
+{
+	unsigned pipe_select_current, gb_pipe_select, tmp;
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "resetting GPU. Bad things might happen.\n");
+	}
+	rv515_vga_render_disable(rdev);
+	r420_pipes_init(rdev);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	pipe_select_current = (tmp >> 2) & 3;
+	tmp = (1 << pipe_select_current) |
+	      (((gb_pipe_select >> 8) & 0xF) << 4);
+	WREG32_PLL(0x000D, tmp);
+	if (r100_gui_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait GUI idle while "
+		       "resetting GPU. Bad things might happen.\n");
+	}
+	if (rv515_mc_wait_for_idle(rdev)) {
+		DRM_ERROR("Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+}
+
+static void rv515_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_width = 128;
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
+	switch (tmp) {
+	case 0:
+		rdev->mc.vram_width = 64;
+		break;
+	case 1:
+		rdev->mc.vram_width = 128;
+		break;
+	default:
+		rdev->mc.vram_width = 128;
+		break;
+	}
+}
+
+static void rv515_mc_init(struct radeon_device *rdev)
+{
+
+	rv515_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	radeon_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
+	r = RREG32(MC_IND_DATA);
+	WREG32(MC_IND_INDEX, 0);
+	return r;
+}
+
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
+	WREG32(MC_IND_DATA, (v));
+	WREG32(MC_IND_INDEX, 0);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(GB_PIPE_SELECT);
+	seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+	tmp = RREG32(SU_REG_DEST);
+	seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
+	tmp = RREG32(GB_TILE_CONFIG);
+	seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+	tmp = RREG32(DST_PIPE_CONFIG);
+	seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+	return 0;
+}
+
+static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(0x2140);
+	seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
+	radeon_asic_reset(rdev);
+	tmp = RREG32(0x425C);
+	seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rv515_pipes_info_list[] = {
+	{"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
+};
+
+static struct drm_info_list rv515_ga_info_list[] = {
+	{"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
+};
+#endif
+
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+	u32 crtc_enabled, tmp, frame_count, blackout;
+	int i, j;
+
+	save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
+	save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
+
+	/* disable VGA render */
+	WREG32(R_000300_VGA_RENDER_CONTROL, 0);
+	/* blank the display controllers */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+				radeon_wait_for_vblank(rdev, i);
+				WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+				WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+
+			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+			WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~AVIVO_CRTC_EN;
+			WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			save->crtc_enabled[i] = false;
+			/* ***** */
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+
+	radeon_mc_wait_for_idle(rdev);
+
+	if (rdev->family >= CHIP_R600) {
+		if (rdev->family >= CHIP_RV770)
+			blackout = RREG32(R700_MC_CITF_CNTL);
+		else
+			blackout = RREG32(R600_CITF_CNTL);
+		if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+			/* Block CPU access */
+			WREG32(R600_BIF_FB_EN, 0);
+			/* blackout the MC */
+			blackout |= R600_BLACKOUT_MASK;
+			if (rdev->family >= CHIP_RV770)
+				WREG32(R700_MC_CITF_CNTL, blackout);
+			else
+				WREG32(R600_CITF_CNTL, blackout);
+		}
+	}
+	/* wait for the MC to settle */
+	udelay(100);
+
+	/* lock double buffered regs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+			if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+				tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+				WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (!(tmp & 1)) {
+				tmp |= 1;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+		}
+	}
+}
+
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+	u32 tmp, frame_count;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->family >= CHIP_RV770) {
+			if (i == 0) {
+				WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+				WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+			} else {
+				WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+				WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+			}
+		}
+		WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+		WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+	}
+	WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+	/* unlock regs and wait for update */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			if ((tmp & 0x3) != 0) {
+				tmp &= ~0x3;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+				tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+				WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (tmp & 1) {
+				tmp &= ~1;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		/* unblackout the MC */
+		if (rdev->family >= CHIP_RV770)
+			tmp = RREG32(R700_MC_CITF_CNTL);
+		else
+			tmp = RREG32(R600_CITF_CNTL);
+		tmp &= ~R600_BLACKOUT_MASK;
+		if (rdev->family >= CHIP_RV770)
+			WREG32(R700_MC_CITF_CNTL, tmp);
+		else
+			WREG32(R600_CITF_CNTL, tmp);
+		/* allow CPU access */
+		WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	/* Unlock vga access */
+	WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
+	mdelay(1);
+	WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void rv515_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rv515_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Write VRAM size in case we are limiting it */
+	WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000001_MC_FB_LOCATION,
+			S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32_MC(R_000002_MC_AGP_LOCATION,
+			S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32_MC(R_000004_MC_AGP_BASE_2,
+			S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+	} else {
+		WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
+		WREG32_MC(R_000003_MC_AGP_BASE, 0);
+		WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
+	}
+
+	rv515_mc_resume(rdev, &save);
+}
+
+void rv515_clock_startup(struct radeon_device *rdev)
+{
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_atom_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	WREG32_PLL(R_00000F_CP_DYN_CNTL,
+		RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
+	WREG32_PLL(R_000011_E2_DYN_CNTL,
+		RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
+	WREG32_PLL(R_000013_IDCT_DYN_CNTL,
+		RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
+}
+
+static int rv515_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rv515_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rv515_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int rv515_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r =  rv515_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rv515_suspend(struct radeon_device *rdev)
+{
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	return 0;
+}
+
+void rv515_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
+}
+
+void rv515_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rv370_pcie_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+int rv515_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	rv515_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rv370_pcie_gart_init(rdev);
+	if (r)
+		return r;
+	rv515_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = rv515_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv370_pcie_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc)
+{
+	int index_reg = 0x6578 + crtc->crtc_offset;
+	int data_reg = 0x657c + crtc->crtc_offset;
+
+	WREG32(0x659C + crtc->crtc_offset, 0x0);
+	WREG32(0x6594 + crtc->crtc_offset, 0x705);
+	WREG32(0x65A4 + crtc->crtc_offset, 0x10001);
+	WREG32(0x65D8 + crtc->crtc_offset, 0x0);
+	WREG32(0x65B0 + crtc->crtc_offset, 0x0);
+	WREG32(0x65C0 + crtc->crtc_offset, 0x0);
+	WREG32(0x65D4 + crtc->crtc_offset, 0x0);
+	WREG32(index_reg, 0x0);
+	WREG32(data_reg, 0x841880A8);
+	WREG32(index_reg, 0x1);
+	WREG32(data_reg, 0x84208680);
+	WREG32(index_reg, 0x2);
+	WREG32(data_reg, 0xBFF880B0);
+	WREG32(index_reg, 0x100);
+	WREG32(data_reg, 0x83D88088);
+	WREG32(index_reg, 0x101);
+	WREG32(data_reg, 0x84608680);
+	WREG32(index_reg, 0x102);
+	WREG32(data_reg, 0xBFF080D0);
+	WREG32(index_reg, 0x200);
+	WREG32(data_reg, 0x83988068);
+	WREG32(index_reg, 0x201);
+	WREG32(data_reg, 0x84A08680);
+	WREG32(index_reg, 0x202);
+	WREG32(data_reg, 0xBFF080F8);
+	WREG32(index_reg, 0x300);
+	WREG32(data_reg, 0x83588058);
+	WREG32(index_reg, 0x301);
+	WREG32(data_reg, 0x84E08660);
+	WREG32(index_reg, 0x302);
+	WREG32(data_reg, 0xBFF88120);
+	WREG32(index_reg, 0x400);
+	WREG32(data_reg, 0x83188040);
+	WREG32(index_reg, 0x401);
+	WREG32(data_reg, 0x85008660);
+	WREG32(index_reg, 0x402);
+	WREG32(data_reg, 0xBFF88150);
+	WREG32(index_reg, 0x500);
+	WREG32(data_reg, 0x82D88030);
+	WREG32(index_reg, 0x501);
+	WREG32(data_reg, 0x85408640);
+	WREG32(index_reg, 0x502);
+	WREG32(data_reg, 0xBFF88180);
+	WREG32(index_reg, 0x600);
+	WREG32(data_reg, 0x82A08018);
+	WREG32(index_reg, 0x601);
+	WREG32(data_reg, 0x85808620);
+	WREG32(index_reg, 0x602);
+	WREG32(data_reg, 0xBFF081B8);
+	WREG32(index_reg, 0x700);
+	WREG32(data_reg, 0x82608010);
+	WREG32(index_reg, 0x701);
+	WREG32(data_reg, 0x85A08600);
+	WREG32(index_reg, 0x702);
+	WREG32(data_reg, 0x800081F0);
+	WREG32(index_reg, 0x800);
+	WREG32(data_reg, 0x8228BFF8);
+	WREG32(index_reg, 0x801);
+	WREG32(data_reg, 0x85E085E0);
+	WREG32(index_reg, 0x802);
+	WREG32(data_reg, 0xBFF88228);
+	WREG32(index_reg, 0x10000);
+	WREG32(data_reg, 0x82A8BF00);
+	WREG32(index_reg, 0x10001);
+	WREG32(data_reg, 0x82A08CC0);
+	WREG32(index_reg, 0x10002);
+	WREG32(data_reg, 0x8008BEF8);
+	WREG32(index_reg, 0x10100);
+	WREG32(data_reg, 0x81F0BF28);
+	WREG32(index_reg, 0x10101);
+	WREG32(data_reg, 0x83608CA0);
+	WREG32(index_reg, 0x10102);
+	WREG32(data_reg, 0x8018BED0);
+	WREG32(index_reg, 0x10200);
+	WREG32(data_reg, 0x8148BF38);
+	WREG32(index_reg, 0x10201);
+	WREG32(data_reg, 0x84408C80);
+	WREG32(index_reg, 0x10202);
+	WREG32(data_reg, 0x8008BEB8);
+	WREG32(index_reg, 0x10300);
+	WREG32(data_reg, 0x80B0BF78);
+	WREG32(index_reg, 0x10301);
+	WREG32(data_reg, 0x85008C20);
+	WREG32(index_reg, 0x10302);
+	WREG32(data_reg, 0x8020BEA0);
+	WREG32(index_reg, 0x10400);
+	WREG32(data_reg, 0x8028BF90);
+	WREG32(index_reg, 0x10401);
+	WREG32(data_reg, 0x85E08BC0);
+	WREG32(index_reg, 0x10402);
+	WREG32(data_reg, 0x8018BE90);
+	WREG32(index_reg, 0x10500);
+	WREG32(data_reg, 0xBFB8BFB0);
+	WREG32(index_reg, 0x10501);
+	WREG32(data_reg, 0x86C08B40);
+	WREG32(index_reg, 0x10502);
+	WREG32(data_reg, 0x8010BE90);
+	WREG32(index_reg, 0x10600);
+	WREG32(data_reg, 0xBF58BFC8);
+	WREG32(index_reg, 0x10601);
+	WREG32(data_reg, 0x87A08AA0);
+	WREG32(index_reg, 0x10602);
+	WREG32(data_reg, 0x8010BE98);
+	WREG32(index_reg, 0x10700);
+	WREG32(data_reg, 0xBF10BFF0);
+	WREG32(index_reg, 0x10701);
+	WREG32(data_reg, 0x886089E0);
+	WREG32(index_reg, 0x10702);
+	WREG32(data_reg, 0x8018BEB0);
+	WREG32(index_reg, 0x10800);
+	WREG32(data_reg, 0xBED8BFE8);
+	WREG32(index_reg, 0x10801);
+	WREG32(data_reg, 0x89408940);
+	WREG32(index_reg, 0x10802);
+	WREG32(data_reg, 0xBFE8BED8);
+	WREG32(index_reg, 0x20000);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20001);
+	WREG32(data_reg, 0x90008000);
+	WREG32(index_reg, 0x20002);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20003);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20100);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20101);
+	WREG32(data_reg, 0x8FE0BF70);
+	WREG32(index_reg, 0x20102);
+	WREG32(data_reg, 0xBFE880C0);
+	WREG32(index_reg, 0x20103);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20200);
+	WREG32(data_reg, 0x8018BFF8);
+	WREG32(index_reg, 0x20201);
+	WREG32(data_reg, 0x8F80BF08);
+	WREG32(index_reg, 0x20202);
+	WREG32(data_reg, 0xBFD081A0);
+	WREG32(index_reg, 0x20203);
+	WREG32(data_reg, 0xBFF88000);
+	WREG32(index_reg, 0x20300);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20301);
+	WREG32(data_reg, 0x8EE0BEC0);
+	WREG32(index_reg, 0x20302);
+	WREG32(data_reg, 0xBFB082A0);
+	WREG32(index_reg, 0x20303);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20400);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20401);
+	WREG32(data_reg, 0x8E00BEA0);
+	WREG32(index_reg, 0x20402);
+	WREG32(data_reg, 0xBF8883C0);
+	WREG32(index_reg, 0x20403);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20500);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20501);
+	WREG32(data_reg, 0x8D00BE90);
+	WREG32(index_reg, 0x20502);
+	WREG32(data_reg, 0xBF588500);
+	WREG32(index_reg, 0x20503);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20600);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20601);
+	WREG32(data_reg, 0x8BC0BE98);
+	WREG32(index_reg, 0x20602);
+	WREG32(data_reg, 0xBF308660);
+	WREG32(index_reg, 0x20603);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20700);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20701);
+	WREG32(data_reg, 0x8A80BEB0);
+	WREG32(index_reg, 0x20702);
+	WREG32(data_reg, 0xBF0087C0);
+	WREG32(index_reg, 0x20703);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20800);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20801);
+	WREG32(data_reg, 0x8920BED0);
+	WREG32(index_reg, 0x20802);
+	WREG32(data_reg, 0xBED08920);
+	WREG32(index_reg, 0x20803);
+	WREG32(data_reg, 0x80008010);
+	WREG32(index_reg, 0x30000);
+	WREG32(data_reg, 0x90008000);
+	WREG32(index_reg, 0x30001);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x30100);
+	WREG32(data_reg, 0x8FE0BF90);
+	WREG32(index_reg, 0x30101);
+	WREG32(data_reg, 0xBFF880A0);
+	WREG32(index_reg, 0x30200);
+	WREG32(data_reg, 0x8F60BF40);
+	WREG32(index_reg, 0x30201);
+	WREG32(data_reg, 0xBFE88180);
+	WREG32(index_reg, 0x30300);
+	WREG32(data_reg, 0x8EC0BF00);
+	WREG32(index_reg, 0x30301);
+	WREG32(data_reg, 0xBFC88280);
+	WREG32(index_reg, 0x30400);
+	WREG32(data_reg, 0x8DE0BEE0);
+	WREG32(index_reg, 0x30401);
+	WREG32(data_reg, 0xBFA083A0);
+	WREG32(index_reg, 0x30500);
+	WREG32(data_reg, 0x8CE0BED0);
+	WREG32(index_reg, 0x30501);
+	WREG32(data_reg, 0xBF7884E0);
+	WREG32(index_reg, 0x30600);
+	WREG32(data_reg, 0x8BA0BED8);
+	WREG32(index_reg, 0x30601);
+	WREG32(data_reg, 0xBF508640);
+	WREG32(index_reg, 0x30700);
+	WREG32(data_reg, 0x8A60BEE8);
+	WREG32(index_reg, 0x30701);
+	WREG32(data_reg, 0xBF2087A0);
+	WREG32(index_reg, 0x30800);
+	WREG32(data_reg, 0x8900BF00);
+	WREG32(index_reg, 0x30801);
+	WREG32(data_reg, 0xBF008900);
+}
+
+struct rv515_watermark {
+	u32        lb_request_fifo_depth;
+	fixed20_12 num_line_pair;
+	fixed20_12 estimated_width;
+	fixed20_12 worst_case_latency;
+	fixed20_12 consumption_rate;
+	fixed20_12 active_time;
+	fixed20_12 dbpp;
+	fixed20_12 priority_mark_max;
+	fixed20_12 priority_mark;
+	fixed20_12 sclk;
+};
+
+static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
+				  struct radeon_crtc *crtc,
+				  struct rv515_watermark *wm)
+{
+	struct drm_display_mode *mode = &crtc->base.mode;
+	fixed20_12 a, b, c;
+	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
+	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
+
+	if (!crtc->base.enabled) {
+		/* FIXME: wouldn't it better to set priority mark to maximum */
+		wm->lb_request_fifo_depth = 4;
+		return;
+	}
+
+	if (crtc->vsc.full > dfixed_const(2))
+		wm->num_line_pair.full = dfixed_const(2);
+	else
+		wm->num_line_pair.full = dfixed_const(1);
+
+	b.full = dfixed_const(mode->crtc_hdisplay);
+	c.full = dfixed_const(256);
+	a.full = dfixed_div(b, c);
+	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+	if (a.full < dfixed_const(4)) {
+		wm->lb_request_fifo_depth = 4;
+	} else {
+		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
+	}
+
+	/* Determine consumption rate
+	 *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
+	 *  vtaps = number of vertical taps,
+	 *  vsc = vertical scaling ratio, defined as source/destination
+	 *  hsc = horizontal scaling ration, defined as source/destination
+	 */
+	a.full = dfixed_const(mode->clock);
+	b.full = dfixed_const(1000);
+	a.full = dfixed_div(a, b);
+	pclk.full = dfixed_div(b, a);
+	if (crtc->rmx_type != RMX_OFF) {
+		b.full = dfixed_const(2);
+		if (crtc->vsc.full > b.full)
+			b.full = crtc->vsc.full;
+		b.full = dfixed_mul(b, crtc->hsc);
+		c.full = dfixed_const(2);
+		b.full = dfixed_div(b, c);
+		consumption_time.full = dfixed_div(pclk, b);
+	} else {
+		consumption_time.full = pclk.full;
+	}
+	a.full = dfixed_const(1);
+	wm->consumption_rate.full = dfixed_div(a, consumption_time);
+
+
+	/* Determine line time
+	 *  LineTime = total time for one line of displayhtotal
+	 *  LineTime = total number of horizontal pixels
+	 *  pclk = pixel clock period(ns)
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	line_time.full = dfixed_mul(a, pclk);
+
+	/* Determine active time
+	 *  ActiveTime = time of active region of display within one line,
+	 *  hactive = total number of horizontal active pixels
+	 *  htotal = total number of horizontal pixels
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->active_time.full = dfixed_mul(line_time, b);
+	wm->active_time.full = dfixed_div(wm->active_time, a);
+
+	/* Determine chunk time
+	 * ChunkTime = the time it takes the DCP to send one chunk of data
+	 * to the LB which consists of pipeline delay and inter chunk gap
+	 * sclk = system clock(Mhz)
+	 */
+	a.full = dfixed_const(600 * 1000);
+	chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+	read_delay_latency.full = dfixed_const(1000);
+
+	/* Determine the worst case latency
+	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
+	 * WorstCaseLatency = worst case time from urgent to when the MC starts
+	 *                    to return data
+	 * READ_DELAY_IDLE_MAX = constant of 1us
+	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
+	 *             which consists of pipeline delay and inter chunk gap
+	 */
+	if (dfixed_trunc(wm->num_line_pair) > 1) {
+		a.full = dfixed_const(3);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	} else {
+		wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
+	}
+
+	/* Determine the tolerable latency
+	 * TolerableLatency = Any given request has only 1 line time
+	 *                    for the data to be returned
+	 * LBRequestFifoDepth = Number of chunk requests the LB can
+	 *                      put into the request FIFO for a display
+	 *  LineTime = total time for one line of display
+	 *  ChunkTime = the time it takes the DCP to send one chunk
+	 *              of data to the LB which consists of
+	 *  pipeline delay and inter chunk gap
+	 */
+	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
+		tolerable_latency.full = line_time.full;
+	} else {
+		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
+		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
+		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
+		tolerable_latency.full = line_time.full - tolerable_latency.full;
+	}
+	/* We assume worst case 32bits (4 bytes) */
+	wm->dbpp.full = dfixed_const(2 * 16);
+
+	/* Determine the maximum priority mark
+	 *  width = viewport width in pixels
+	 */
+	a.full = dfixed_const(16);
+	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
+
+	/* Determine estimated width */
+	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+	estimated_width.full = dfixed_div(estimated_width, consumption_time);
+	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+		wm->priority_mark.full = wm->priority_mark_max.full;
+	} else {
+		a.full = dfixed_const(16);
+		wm->priority_mark.full = dfixed_div(estimated_width, a);
+		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
+		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+	}
+}
+
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	struct rv515_watermark wm0;
+	struct rv515_watermark wm1;
+	u32 tmp;
+	u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
+	u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
+	fixed20_12 priority_mark02, priority_mark12, fill_rate;
+	fixed20_12 a, b;
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
+	rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
+
+	tmp = wm0.lb_request_fifo_depth;
+	tmp |= wm1.lb_request_fifo_depth << 16;
+	WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
+
+	if (mode0 && mode1) {
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
+		else
+			a.full = wm0.num_line_pair.full;
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
+		else
+			b.full = wm1.num_line_pair.full;
+		a.full += b.full;
+		fill_rate.full = dfixed_div(wm0.sclk, a);
+		if (wm0.consumption_rate.full > fill_rate.full) {
+			b.full = wm0.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			priority_mark02.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm1.consumption_rate.full > fill_rate.full) {
+			b.full = wm1.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			priority_mark12.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm0.priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark.full;
+		if (wm0.priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark_max.full;
+		if (wm1.priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark.full;
+		if (wm1.priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark_max.full;
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2) {
+			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+		}
+	} else if (mode0) {
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
+		else
+			a.full = wm0.num_line_pair.full;
+		fill_rate.full = dfixed_div(wm0.sclk, a);
+		if (wm0.consumption_rate.full > fill_rate.full) {
+			b.full = wm0.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			priority_mark02.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			b.full = dfixed_const(16);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm0.priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark.full;
+		if (wm0.priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark_max.full;
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		if (rdev->disp_priority == 2)
+			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+	} else if (mode1) {
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
+		else
+			a.full = wm1.num_line_pair.full;
+		fill_rate.full = dfixed_div(wm1.sclk, a);
+		if (wm1.consumption_rate.full > fill_rate.full) {
+			b.full = wm1.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			priority_mark12.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm1.priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark.full;
+		if (wm1.priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark_max.full;
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2)
+			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+	}
+
+	WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+	WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+	WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+	WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+}
+
+void rv515_bandwidth_update(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	/*
+	 * Set display0/1 priority up in the memory controller for
+	 * modes if the user specifies HIGH for displaypriority
+	 * option.
+	 */
+	if ((rdev->disp_priority == 2) &&
+	    (rdev->family == CHIP_RV515)) {
+		tmp = RREG32_MC(MC_MISC_LAT_TIMER);
+		tmp &= ~MC_DISP1R_INIT_LAT_MASK;
+		tmp &= ~MC_DISP0R_INIT_LAT_MASK;
+		if (mode1)
+			tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
+		if (mode0)
+			tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
+		WREG32_MC(MC_MISC_LAT_TIMER, tmp);
+	}
+	rv515_bandwidth_avivo_update(rdev);
+}


Property changes on: trunk/sys/dev/drm2/radeon/rv515.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rv515_reg_safe.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rv515_reg_safe.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rv515_reg_safe.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,61 @@
+/* $MidnightBSD$ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rv515_reg_safe.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+static const unsigned rv515_reg_safe_bm[219] = {
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+	0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFEFC6, 0xF00EBFFF, 0x007C0000,
+	0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0x1FFFFC48, 0xFFFFE000, 0xFFFFFE1E, 0xFFFFFFFF,
+	0x388F8F50, 0xFFF88082, 0xFF0000FC, 0xFAE00BFF,
+	0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
+	0x00008CFC, 0xFFFCC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE80FFFF,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x0003FC0B, 0x3FFFFCFF, 0xFFBFFB99, 0xFFDFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+};


Property changes on: trunk/sys/dev/drm2/radeon/rv515_reg_safe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rv515d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rv515d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rv515d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,653 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV515D_H__
+#define __RV515D_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rv515d.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/*
+ * RV515 registers
+ */
+#define PCIE_INDEX			0x0030
+#define PCIE_DATA			0x0034
+#define	MC_IND_INDEX			0x0070
+#define		MC_IND_WR_EN				(1 << 24)
+#define	MC_IND_DATA			0x0074
+#define	RBBM_SOFT_RESET			0x00F0
+#define	CONFIG_MEMSIZE			0x00F8
+#define HDP_FB_LOCATION			0x0134
+#define	CP_CSQ_CNTL			0x0740
+#define	CP_CSQ_MODE			0x0744
+#define	CP_CSQ_ADDR			0x07F0
+#define	CP_CSQ_DATA			0x07F4
+#define	CP_CSQ_STAT			0x07F8
+#define	CP_CSQ2_STAT			0x07FC
+#define	RBBM_STATUS			0x0E40
+#define	DST_PIPE_CONFIG			0x170C
+#define	WAIT_UNTIL			0x1720
+#define		WAIT_2D_IDLE				(1 << 14)
+#define		WAIT_3D_IDLE				(1 << 15)
+#define		WAIT_2D_IDLECLEAN			(1 << 16)
+#define		WAIT_3D_IDLECLEAN			(1 << 17)
+#define	ISYNC_CNTL			0x1724
+#define		ISYNC_ANY2D_IDLE3D			(1 << 0)
+#define		ISYNC_ANY3D_IDLE2D			(1 << 1)
+#define		ISYNC_TRIG2D_IDLE3D			(1 << 2)
+#define		ISYNC_TRIG3D_IDLE2D			(1 << 3)
+#define		ISYNC_WAIT_IDLEGUI			(1 << 4)
+#define		ISYNC_CPSCRATCH_IDLEGUI			(1 << 5)
+#define	VAP_INDEX_OFFSET		0x208C
+#define	VAP_PVS_STATE_FLUSH_REG		0x2284
+#define	GB_ENABLE			0x4008
+#define	GB_MSPOS0			0x4010
+#define		MS_X0_SHIFT				0
+#define		MS_Y0_SHIFT				4
+#define		MS_X1_SHIFT				8
+#define		MS_Y1_SHIFT				12
+#define		MS_X2_SHIFT				16
+#define		MS_Y2_SHIFT				20
+#define		MSBD0_Y_SHIFT				24
+#define		MSBD0_X_SHIFT				28
+#define	GB_MSPOS1			0x4014
+#define		MS_X3_SHIFT				0
+#define		MS_Y3_SHIFT				4
+#define		MS_X4_SHIFT				8
+#define		MS_Y4_SHIFT				12
+#define		MS_X5_SHIFT				16
+#define		MS_Y5_SHIFT				20
+#define		MSBD1_SHIFT				24
+#define GB_TILE_CONFIG			0x4018
+#define		ENABLE_TILING				(1 << 0)
+#define		PIPE_COUNT_MASK				0x0000000E
+#define		PIPE_COUNT_SHIFT			1
+#define		TILE_SIZE_8				(0 << 4)
+#define		TILE_SIZE_16				(1 << 4)
+#define		TILE_SIZE_32				(2 << 4)
+#define		SUBPIXEL_1_12				(0 << 16)
+#define		SUBPIXEL_1_16				(1 << 16)
+#define	GB_SELECT			0x401C
+#define	GB_AA_CONFIG			0x4020
+#define	GB_PIPE_SELECT			0x402C
+#define	GA_ENHANCE			0x4274
+#define		GA_DEADLOCK_CNTL			(1 << 0)
+#define		GA_FASTSYNC_CNTL			(1 << 1)
+#define	GA_POLY_MODE			0x4288
+#define		FRONT_PTYPE_POINT			(0 << 4)
+#define		FRONT_PTYPE_LINE			(1 << 4)
+#define		FRONT_PTYPE_TRIANGE			(2 << 4)
+#define		BACK_PTYPE_POINT			(0 << 7)
+#define		BACK_PTYPE_LINE				(1 << 7)
+#define		BACK_PTYPE_TRIANGE			(2 << 7)
+#define	GA_ROUND_MODE			0x428C
+#define		GEOMETRY_ROUND_TRUNC			(0 << 0)
+#define		GEOMETRY_ROUND_NEAREST			(1 << 0)
+#define		COLOR_ROUND_TRUNC			(0 << 2)
+#define		COLOR_ROUND_NEAREST			(1 << 2)
+#define	SU_REG_DEST			0x42C8
+#define	RB3D_DSTCACHE_CTLSTAT		0x4E4C
+#define		RB3D_DC_FLUSH				(2 << 0)
+#define		RB3D_DC_FREE				(2 << 2)
+#define		RB3D_DC_FINISH				(1 << 4)
+#define ZB_ZCACHE_CTLSTAT		0x4F18
+#define		ZC_FLUSH				(1 << 0)
+#define		ZC_FREE					(1 << 1)
+#define DC_LB_MEMORY_SPLIT		0x6520
+#define		DC_LB_MEMORY_SPLIT_MASK			0x00000003
+#define		DC_LB_MEMORY_SPLIT_SHIFT		0
+#define		DC_LB_MEMORY_SPLIT_D1HALF_D2HALF	0
+#define		DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q		1
+#define		DC_LB_MEMORY_SPLIT_D1_ONLY		2
+#define		DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q		3
+#define		DC_LB_MEMORY_SPLIT_SHIFT_MODE		(1 << 2)
+#define		DC_LB_DISP1_END_ADR_SHIFT		4
+#define		DC_LB_DISP1_END_ADR_MASK		0x00007FF0
+#define D1MODE_PRIORITY_A_CNT		0x6548
+#define		MODE_PRIORITY_MARK_MASK			0x00007FFF
+#define		MODE_PRIORITY_OFF			(1 << 16)
+#define		MODE_PRIORITY_ALWAYS_ON			(1 << 20)
+#define		MODE_PRIORITY_FORCE_MASK		(1 << 24)
+#define D1MODE_PRIORITY_B_CNT		0x654C
+#define LB_MAX_REQ_OUTSTANDING		0x6D58
+#define		LB_D1_MAX_REQ_OUTSTANDING_MASK		0x0000000F
+#define		LB_D1_MAX_REQ_OUTSTANDING_SHIFT		0
+#define		LB_D2_MAX_REQ_OUTSTANDING_MASK		0x000F0000
+#define		LB_D2_MAX_REQ_OUTSTANDING_SHIFT		16
+#define D2MODE_PRIORITY_A_CNT		0x6D48
+#define D2MODE_PRIORITY_B_CNT		0x6D4C
+
+/* ix[MC] registers */
+#define MC_FB_LOCATION			0x01
+#define		MC_FB_START_MASK			0x0000FFFF
+#define		MC_FB_START_SHIFT			0
+#define		MC_FB_TOP_MASK				0xFFFF0000
+#define		MC_FB_TOP_SHIFT				16
+#define MC_AGP_LOCATION			0x02
+#define		MC_AGP_START_MASK			0x0000FFFF
+#define		MC_AGP_START_SHIFT			0
+#define		MC_AGP_TOP_MASK				0xFFFF0000
+#define		MC_AGP_TOP_SHIFT			16
+#define MC_AGP_BASE			0x03
+#define MC_AGP_BASE_2			0x04
+#define	MC_CNTL				0x5
+#define		MEM_NUM_CHANNELS_MASK			0x00000003
+#define	MC_STATUS			0x08
+#define		MC_STATUS_IDLE				(1 << 4)
+#define	MC_MISC_LAT_TIMER		0x09
+#define		MC_CPR_INIT_LAT_MASK			0x0000000F
+#define		MC_VF_INIT_LAT_MASK			0x000000F0
+#define		MC_DISP0R_INIT_LAT_MASK			0x00000F00
+#define		MC_DISP0R_INIT_LAT_SHIFT		8
+#define		MC_DISP1R_INIT_LAT_MASK			0x0000F000
+#define		MC_DISP1R_INIT_LAT_SHIFT		12
+#define		MC_FIXED_INIT_LAT_MASK			0x000F0000
+#define		MC_E2R_INIT_LAT_MASK			0x00F00000
+#define		SAME_PAGE_PRIO_MASK			0x0F000000
+#define		MC_GLOBW_INIT_LAT_MASK			0xF0000000
+
+
+/*
+ * PM4 packet
+ */
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+
+/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_000300_VGA_RENDER_CONTROL                  0x000300
+#define   S_000300_VGA_BLINK_RATE(x)                   (((x) & 0x1F) << 0)
+#define   G_000300_VGA_BLINK_RATE(x)                   (((x) >> 0) & 0x1F)
+#define   C_000300_VGA_BLINK_RATE                      0xFFFFFFE0
+#define   S_000300_VGA_BLINK_MODE(x)                   (((x) & 0x3) << 5)
+#define   G_000300_VGA_BLINK_MODE(x)                   (((x) >> 5) & 0x3)
+#define   C_000300_VGA_BLINK_MODE                      0xFFFFFF9F
+#define   S_000300_VGA_CURSOR_BLINK_INVERT(x)          (((x) & 0x1) << 7)
+#define   G_000300_VGA_CURSOR_BLINK_INVERT(x)          (((x) >> 7) & 0x1)
+#define   C_000300_VGA_CURSOR_BLINK_INVERT             0xFFFFFF7F
+#define   S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x)       (((x) & 0x1) << 8)
+#define   G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x)       (((x) >> 8) & 0x1)
+#define   C_000300_VGA_EXTD_ADDR_COUNT_ENABLE          0xFFFFFEFF
+#define   S_000300_VGA_VSTATUS_CNTL(x)                 (((x) & 0x3) << 16)
+#define   G_000300_VGA_VSTATUS_CNTL(x)                 (((x) >> 16) & 0x3)
+#define   C_000300_VGA_VSTATUS_CNTL                    0xFFFCFFFF
+#define   S_000300_VGA_LOCK_8DOT(x)                    (((x) & 0x1) << 24)
+#define   G_000300_VGA_LOCK_8DOT(x)                    (((x) >> 24) & 0x1)
+#define   C_000300_VGA_LOCK_8DOT                       0xFEFFFFFF
+#define   S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25)
+#define   G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1)
+#define   C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL    0xFDFFFFFF
+#define R_000310_VGA_MEMORY_BASE_ADDRESS             0x000310
+#define   S_000310_VGA_MEMORY_BASE_ADDRESS(x)          (((x) & 0xFFFFFFFF) << 0)
+#define   G_000310_VGA_MEMORY_BASE_ADDRESS(x)          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000310_VGA_MEMORY_BASE_ADDRESS             0x00000000
+#define R_000328_VGA_HDP_CONTROL                     0x000328
+#define   S_000328_VGA_MEM_PAGE_SELECT_EN(x)           (((x) & 0x1) << 0)
+#define   G_000328_VGA_MEM_PAGE_SELECT_EN(x)           (((x) >> 0) & 0x1)
+#define   C_000328_VGA_MEM_PAGE_SELECT_EN              0xFFFFFFFE
+#define   S_000328_VGA_RBBM_LOCK_DISABLE(x)            (((x) & 0x1) << 8)
+#define   G_000328_VGA_RBBM_LOCK_DISABLE(x)            (((x) >> 8) & 0x1)
+#define   C_000328_VGA_RBBM_LOCK_DISABLE               0xFFFFFEFF
+#define   S_000328_VGA_SOFT_RESET(x)                   (((x) & 0x1) << 16)
+#define   G_000328_VGA_SOFT_RESET(x)                   (((x) >> 16) & 0x1)
+#define   C_000328_VGA_SOFT_RESET                      0xFFFEFFFF
+#define   S_000328_VGA_TEST_RESET_CONTROL(x)           (((x) & 0x1) << 24)
+#define   G_000328_VGA_TEST_RESET_CONTROL(x)           (((x) >> 24) & 0x1)
+#define   C_000328_VGA_TEST_RESET_CONTROL              0xFEFFFFFF
+#define R_000330_D1VGA_CONTROL                       0x000330
+#define   S_000330_D1VGA_MODE_ENABLE(x)                (((x) & 0x1) << 0)
+#define   G_000330_D1VGA_MODE_ENABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000330_D1VGA_MODE_ENABLE                   0xFFFFFFFE
+#define   S_000330_D1VGA_TIMING_SELECT(x)              (((x) & 0x1) << 8)
+#define   G_000330_D1VGA_TIMING_SELECT(x)              (((x) >> 8) & 0x1)
+#define   C_000330_D1VGA_TIMING_SELECT                 0xFFFFFEFF
+#define   S_000330_D1VGA_SYNC_POLARITY_SELECT(x)       (((x) & 0x1) << 9)
+#define   G_000330_D1VGA_SYNC_POLARITY_SELECT(x)       (((x) >> 9) & 0x1)
+#define   C_000330_D1VGA_SYNC_POLARITY_SELECT          0xFFFFFDFF
+#define   S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x)     (((x) & 0x1) << 10)
+#define   G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x)     (((x) >> 10) & 0x1)
+#define   C_000330_D1VGA_OVERSCAN_TIMING_SELECT        0xFFFFFBFF
+#define   S_000330_D1VGA_OVERSCAN_COLOR_EN(x)          (((x) & 0x1) << 16)
+#define   G_000330_D1VGA_OVERSCAN_COLOR_EN(x)          (((x) >> 16) & 0x1)
+#define   C_000330_D1VGA_OVERSCAN_COLOR_EN             0xFFFEFFFF
+#define   S_000330_D1VGA_ROTATE(x)                     (((x) & 0x3) << 24)
+#define   G_000330_D1VGA_ROTATE(x)                     (((x) >> 24) & 0x3)
+#define   C_000330_D1VGA_ROTATE                        0xFCFFFFFF
+#define R_000338_D2VGA_CONTROL                       0x000338
+#define   S_000338_D2VGA_MODE_ENABLE(x)                (((x) & 0x1) << 0)
+#define   G_000338_D2VGA_MODE_ENABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000338_D2VGA_MODE_ENABLE                   0xFFFFFFFE
+#define   S_000338_D2VGA_TIMING_SELECT(x)              (((x) & 0x1) << 8)
+#define   G_000338_D2VGA_TIMING_SELECT(x)              (((x) >> 8) & 0x1)
+#define   C_000338_D2VGA_TIMING_SELECT                 0xFFFFFEFF
+#define   S_000338_D2VGA_SYNC_POLARITY_SELECT(x)       (((x) & 0x1) << 9)
+#define   G_000338_D2VGA_SYNC_POLARITY_SELECT(x)       (((x) >> 9) & 0x1)
+#define   C_000338_D2VGA_SYNC_POLARITY_SELECT          0xFFFFFDFF
+#define   S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x)     (((x) & 0x1) << 10)
+#define   G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x)     (((x) >> 10) & 0x1)
+#define   C_000338_D2VGA_OVERSCAN_TIMING_SELECT        0xFFFFFBFF
+#define   S_000338_D2VGA_OVERSCAN_COLOR_EN(x)          (((x) & 0x1) << 16)
+#define   G_000338_D2VGA_OVERSCAN_COLOR_EN(x)          (((x) >> 16) & 0x1)
+#define   C_000338_D2VGA_OVERSCAN_COLOR_EN             0xFFFEFFFF
+#define   S_000338_D2VGA_ROTATE(x)                     (((x) & 0x3) << 24)
+#define   G_000338_D2VGA_ROTATE(x)                     (((x) >> 24) & 0x3)
+#define   C_000338_D2VGA_ROTATE                        0xFCFFFFFF
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_RBBM_HIBUSY(x)                      (((x) & 0x1) << 28)
+#define   G_000E40_RBBM_HIBUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_000E40_RBBM_HIBUSY                         0xEFFFFFFF
+#define   S_000E40_SKID_CFBUSY(x)                      (((x) & 0x1) << 29)
+#define   G_000E40_SKID_CFBUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_000E40_SKID_CFBUSY                         0xDFFFFFFF
+#define   S_000E40_VAP_VF_BUSY(x)                      (((x) & 0x1) << 30)
+#define   G_000E40_VAP_VF_BUSY(x)                      (((x) >> 30) & 0x1)
+#define   C_000E40_VAP_VF_BUSY                         0xBFFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_006080_D1CRTC_CONTROL                      0x006080
+#define   S_006080_D1CRTC_MASTER_EN(x)                 (((x) & 0x1) << 0)
+#define   G_006080_D1CRTC_MASTER_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_006080_D1CRTC_MASTER_EN                    0xFFFFFFFE
+#define   S_006080_D1CRTC_SYNC_RESET_SEL(x)            (((x) & 0x1) << 4)
+#define   G_006080_D1CRTC_SYNC_RESET_SEL(x)            (((x) >> 4) & 0x1)
+#define   C_006080_D1CRTC_SYNC_RESET_SEL               0xFFFFFFEF
+#define   S_006080_D1CRTC_DISABLE_POINT_CNTL(x)        (((x) & 0x3) << 8)
+#define   G_006080_D1CRTC_DISABLE_POINT_CNTL(x)        (((x) >> 8) & 0x3)
+#define   C_006080_D1CRTC_DISABLE_POINT_CNTL           0xFFFFFCFF
+#define   S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) & 0x1) << 16)
+#define   G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) >> 16) & 0x1)
+#define   C_006080_D1CRTC_CURRENT_MASTER_EN_STATE      0xFFFEFFFF
+#define   S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define   G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define   C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE    0xFEFFFFFF
+#define R_0060E8_D1CRTC_UPDATE_LOCK                  0x0060E8
+#define   S_0060E8_D1CRTC_UPDATE_LOCK(x)               (((x) & 0x1) << 0)
+#define   G_0060E8_D1CRTC_UPDATE_LOCK(x)               (((x) >> 0) & 0x1)
+#define   C_0060E8_D1CRTC_UPDATE_LOCK                  0xFFFFFFFE
+#define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS      0x006110
+#define   S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) & 0xFFFFFFFF) << 0)
+#define   G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS      0x00000000
+#define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS    0x006118
+#define   S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define   G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS    0x00000000
+#define R_006880_D2CRTC_CONTROL                      0x006880
+#define   S_006880_D2CRTC_MASTER_EN(x)                 (((x) & 0x1) << 0)
+#define   G_006880_D2CRTC_MASTER_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_006880_D2CRTC_MASTER_EN                    0xFFFFFFFE
+#define   S_006880_D2CRTC_SYNC_RESET_SEL(x)            (((x) & 0x1) << 4)
+#define   G_006880_D2CRTC_SYNC_RESET_SEL(x)            (((x) >> 4) & 0x1)
+#define   C_006880_D2CRTC_SYNC_RESET_SEL               0xFFFFFFEF
+#define   S_006880_D2CRTC_DISABLE_POINT_CNTL(x)        (((x) & 0x3) << 8)
+#define   G_006880_D2CRTC_DISABLE_POINT_CNTL(x)        (((x) >> 8) & 0x3)
+#define   C_006880_D2CRTC_DISABLE_POINT_CNTL           0xFFFFFCFF
+#define   S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) & 0x1) << 16)
+#define   G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) >> 16) & 0x1)
+#define   C_006880_D2CRTC_CURRENT_MASTER_EN_STATE      0xFFFEFFFF
+#define   S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define   G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define   C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE    0xFEFFFFFF
+#define R_0068E8_D2CRTC_UPDATE_LOCK                  0x0068E8
+#define   S_0068E8_D2CRTC_UPDATE_LOCK(x)               (((x) & 0x1) << 0)
+#define   G_0068E8_D2CRTC_UPDATE_LOCK(x)               (((x) >> 0) & 0x1)
+#define   C_0068E8_D2CRTC_UPDATE_LOCK                  0xFFFFFFFE
+#define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS      0x006910
+#define   S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) & 0xFFFFFFFF) << 0)
+#define   G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS      0x00000000
+#define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS    0x006918
+#define   S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define   G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS    0x00000000
+
+
+#define R_000001_MC_FB_LOCATION                      0x000001
+#define   S_000001_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000001_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000001_MC_FB_START                         0xFFFF0000
+#define   S_000001_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000001_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000001_MC_FB_TOP                           0x0000FFFF
+#define R_000002_MC_AGP_LOCATION                     0x000002
+#define   S_000002_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000002_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000002_MC_AGP_START                        0xFFFF0000
+#define   S_000002_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000002_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000002_MC_AGP_TOP                          0x0000FFFF
+#define R_000003_MC_AGP_BASE                         0x000003
+#define   S_000003_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000003_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000003_AGP_BASE_ADDR                       0x00000000
+#define R_000004_MC_AGP_BASE_2                       0x000004
+#define   S_000004_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000004_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000004_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+
+#define R_00000F_CP_DYN_CNTL                         0x00000F
+#define   S_00000F_CP_FORCEON(x)                       (((x) & 0x1) << 0)
+#define   G_00000F_CP_FORCEON(x)                       (((x) >> 0) & 0x1)
+#define   C_00000F_CP_FORCEON                          0xFFFFFFFE
+#define   S_00000F_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 1)
+#define   G_00000F_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 1) & 0x1)
+#define   C_00000F_CP_MAX_DYN_STOP_LAT                 0xFFFFFFFD
+#define   S_00000F_CP_CLOCK_STATUS(x)                  (((x) & 0x1) << 2)
+#define   G_00000F_CP_CLOCK_STATUS(x)                  (((x) >> 2) & 0x1)
+#define   C_00000F_CP_CLOCK_STATUS                     0xFFFFFFFB
+#define   S_00000F_CP_PROG_SHUTOFF(x)                  (((x) & 0x1) << 3)
+#define   G_00000F_CP_PROG_SHUTOFF(x)                  (((x) >> 3) & 0x1)
+#define   C_00000F_CP_PROG_SHUTOFF                     0xFFFFFFF7
+#define   S_00000F_CP_PROG_DELAY_VALUE(x)              (((x) & 0xFF) << 4)
+#define   G_00000F_CP_PROG_DELAY_VALUE(x)              (((x) >> 4) & 0xFF)
+#define   C_00000F_CP_PROG_DELAY_VALUE                 0xFFFFF00F
+#define   S_00000F_CP_LOWER_POWER_IDLE(x)              (((x) & 0xFF) << 12)
+#define   G_00000F_CP_LOWER_POWER_IDLE(x)              (((x) >> 12) & 0xFF)
+#define   C_00000F_CP_LOWER_POWER_IDLE                 0xFFF00FFF
+#define   S_00000F_CP_LOWER_POWER_IGNORE(x)            (((x) & 0x1) << 20)
+#define   G_00000F_CP_LOWER_POWER_IGNORE(x)            (((x) >> 20) & 0x1)
+#define   C_00000F_CP_LOWER_POWER_IGNORE               0xFFEFFFFF
+#define   S_00000F_CP_NORMAL_POWER_IGNORE(x)           (((x) & 0x1) << 21)
+#define   G_00000F_CP_NORMAL_POWER_IGNORE(x)           (((x) >> 21) & 0x1)
+#define   C_00000F_CP_NORMAL_POWER_IGNORE              0xFFDFFFFF
+#define   S_00000F_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_00000F_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_00000F_SPARE                               0xFF3FFFFF
+#define   S_00000F_CP_NORMAL_POWER_BUSY(x)             (((x) & 0xFF) << 24)
+#define   G_00000F_CP_NORMAL_POWER_BUSY(x)             (((x) >> 24) & 0xFF)
+#define   C_00000F_CP_NORMAL_POWER_BUSY                0x00FFFFFF
+#define R_000011_E2_DYN_CNTL                         0x000011
+#define   S_000011_E2_FORCEON(x)                       (((x) & 0x1) << 0)
+#define   G_000011_E2_FORCEON(x)                       (((x) >> 0) & 0x1)
+#define   C_000011_E2_FORCEON                          0xFFFFFFFE
+#define   S_000011_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 1)
+#define   G_000011_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 1) & 0x1)
+#define   C_000011_E2_MAX_DYN_STOP_LAT                 0xFFFFFFFD
+#define   S_000011_E2_CLOCK_STATUS(x)                  (((x) & 0x1) << 2)
+#define   G_000011_E2_CLOCK_STATUS(x)                  (((x) >> 2) & 0x1)
+#define   C_000011_E2_CLOCK_STATUS                     0xFFFFFFFB
+#define   S_000011_E2_PROG_SHUTOFF(x)                  (((x) & 0x1) << 3)
+#define   G_000011_E2_PROG_SHUTOFF(x)                  (((x) >> 3) & 0x1)
+#define   C_000011_E2_PROG_SHUTOFF                     0xFFFFFFF7
+#define   S_000011_E2_PROG_DELAY_VALUE(x)              (((x) & 0xFF) << 4)
+#define   G_000011_E2_PROG_DELAY_VALUE(x)              (((x) >> 4) & 0xFF)
+#define   C_000011_E2_PROG_DELAY_VALUE                 0xFFFFF00F
+#define   S_000011_E2_LOWER_POWER_IDLE(x)              (((x) & 0xFF) << 12)
+#define   G_000011_E2_LOWER_POWER_IDLE(x)              (((x) >> 12) & 0xFF)
+#define   C_000011_E2_LOWER_POWER_IDLE                 0xFFF00FFF
+#define   S_000011_E2_LOWER_POWER_IGNORE(x)            (((x) & 0x1) << 20)
+#define   G_000011_E2_LOWER_POWER_IGNORE(x)            (((x) >> 20) & 0x1)
+#define   C_000011_E2_LOWER_POWER_IGNORE               0xFFEFFFFF
+#define   S_000011_E2_NORMAL_POWER_IGNORE(x)           (((x) & 0x1) << 21)
+#define   G_000011_E2_NORMAL_POWER_IGNORE(x)           (((x) >> 21) & 0x1)
+#define   C_000011_E2_NORMAL_POWER_IGNORE              0xFFDFFFFF
+#define   S_000011_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_000011_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_000011_SPARE                               0xFF3FFFFF
+#define   S_000011_E2_NORMAL_POWER_BUSY(x)             (((x) & 0xFF) << 24)
+#define   G_000011_E2_NORMAL_POWER_BUSY(x)             (((x) >> 24) & 0xFF)
+#define   C_000011_E2_NORMAL_POWER_BUSY                0x00FFFFFF
+#define R_000013_IDCT_DYN_CNTL                       0x000013
+#define   S_000013_IDCT_FORCEON(x)                     (((x) & 0x1) << 0)
+#define   G_000013_IDCT_FORCEON(x)                     (((x) >> 0) & 0x1)
+#define   C_000013_IDCT_FORCEON                        0xFFFFFFFE
+#define   S_000013_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 1)
+#define   G_000013_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 1) & 0x1)
+#define   C_000013_IDCT_MAX_DYN_STOP_LAT               0xFFFFFFFD
+#define   S_000013_IDCT_CLOCK_STATUS(x)                (((x) & 0x1) << 2)
+#define   G_000013_IDCT_CLOCK_STATUS(x)                (((x) >> 2) & 0x1)
+#define   C_000013_IDCT_CLOCK_STATUS                   0xFFFFFFFB
+#define   S_000013_IDCT_PROG_SHUTOFF(x)                (((x) & 0x1) << 3)
+#define   G_000013_IDCT_PROG_SHUTOFF(x)                (((x) >> 3) & 0x1)
+#define   C_000013_IDCT_PROG_SHUTOFF                   0xFFFFFFF7
+#define   S_000013_IDCT_PROG_DELAY_VALUE(x)            (((x) & 0xFF) << 4)
+#define   G_000013_IDCT_PROG_DELAY_VALUE(x)            (((x) >> 4) & 0xFF)
+#define   C_000013_IDCT_PROG_DELAY_VALUE               0xFFFFF00F
+#define   S_000013_IDCT_LOWER_POWER_IDLE(x)            (((x) & 0xFF) << 12)
+#define   G_000013_IDCT_LOWER_POWER_IDLE(x)            (((x) >> 12) & 0xFF)
+#define   C_000013_IDCT_LOWER_POWER_IDLE               0xFFF00FFF
+#define   S_000013_IDCT_LOWER_POWER_IGNORE(x)          (((x) & 0x1) << 20)
+#define   G_000013_IDCT_LOWER_POWER_IGNORE(x)          (((x) >> 20) & 0x1)
+#define   C_000013_IDCT_LOWER_POWER_IGNORE             0xFFEFFFFF
+#define   S_000013_IDCT_NORMAL_POWER_IGNORE(x)         (((x) & 0x1) << 21)
+#define   G_000013_IDCT_NORMAL_POWER_IGNORE(x)         (((x) >> 21) & 0x1)
+#define   C_000013_IDCT_NORMAL_POWER_IGNORE            0xFFDFFFFF
+#define   S_000013_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_000013_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_000013_SPARE                               0xFF3FFFFF
+#define   S_000013_IDCT_NORMAL_POWER_BUSY(x)           (((x) & 0xFF) << 24)
+#define   G_000013_IDCT_NORMAL_POWER_BUSY(x)           (((x) >> 24) & 0xFF)
+#define   C_000013_IDCT_NORMAL_POWER_BUSY              0x00FFFFFF
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rv515d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rv770.c
===================================================================
--- trunk/sys/dev/drm2/radeon/rv770.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rv770.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1307 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rv770.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "rv770d.h"
+#include "atom.h"
+#include "avivod.h"
+
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+
+static void rv770_gpu_init(struct radeon_device *rdev);
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+void rv770_fini(struct radeon_device *rdev);
+#endif
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+	int i;
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	if (radeon_crtc->crtc_id) {
+		WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	} else {
+		WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	}
+	WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
+/* get temperature in millidegrees */
+int rv770_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+		ASIC_T_SHIFT;
+	int actual_temp;
+
+	if (temp & 0x400)
+		actual_temp = -256;
+	else if (temp & 0x200)
+		actual_temp = 255;
+	else if (temp & 0x100) {
+		actual_temp = temp & 0x1ff;
+		actual_temp |= ~0x1ff;
+	} else
+		actual_temp = temp & 0xff;
+
+	return (actual_temp * 1000) / 2;
+}
+
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->voltage == 0xff01)
+			return;
+		if (voltage->voltage != rdev->pm.current_vddc) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+		}
+	}
+}
+
+/*
+ * GART
+ */
+static int rv770_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	if (rdev->family == CHIP_RV740)
+		WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	for (i = 1; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	r600_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void rv770_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Disable all tables */
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void rv770_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rv770_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+
+static void rv770_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void rv770_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	/* r7xx hw bug.  Read from HDP_DEBUG1 rather
+	 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
+	 */
+	tmp = RREG32(HDP_DEBUG1);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture*/
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	rv515_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+
+/*
+ * CP.
+ */
+void r700_cp_stop(struct radeon_device *rdev)
+{
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+	WREG32(SCRATCH_UMSK, 0);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+}
+
+static int rv770_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r700_cp_stop(rdev);
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+void r700_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r700_cp_stop(rdev);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+/*
+ * Core functions
+ */
+static void rv770_gpu_init(struct radeon_device *rdev)
+{
+	int i, j, num_qd_pipes;
+	u32 ta_aux_cntl;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 db_debug3;
+	u32 num_gs_verts_per_thread;
+	u32 vgt_gs_per_es;
+	u32 gs_prim_buffer_depth = 0;
+	u32 sq_ms_fifo_sizes;
+	u32 sq_config;
+	u32 sq_thread_resource_mgmt;
+	u32 hdp_host_path_cntl;
+	u32 sq_dyn_gpr_size_simd_ab_0;
+	u32 gb_tiling_config = 0;
+	u32 cc_rb_backend_disable = 0;
+	u32 cc_gc_shader_pipe_config = 0;
+	u32 mc_arb_ramcfg;
+	u32 db_debug4, tmp;
+	u32 inactive_pipes, shader_pipe_config;
+	u32 disabled_rb_mask;
+	unsigned active_number;
+
+	/* setup chip specs */
+	rdev->config.rv770.tiling_group_size = 256;
+	switch (rdev->family) {
+	case CHIP_RV770:
+		rdev->config.rv770.max_pipes = 4;
+		rdev->config.rv770.max_tile_pipes = 8;
+		rdev->config.rv770.max_simds = 10;
+		rdev->config.rv770.max_backends = 4;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 512;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 128;
+		rdev->config.rv770.sx_max_export_pos_size = 16;
+		rdev->config.rv770.sx_max_export_smx_size = 112;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0xF9;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV730:
+		rdev->config.rv770.max_pipes = 2;
+		rdev->config.rv770.max_tile_pipes = 4;
+		rdev->config.rv770.max_simds = 8;
+		rdev->config.rv770.max_backends = 2;
+		rdev->config.rv770.max_gprs = 128;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 256;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 256;
+		rdev->config.rv770.sx_max_export_pos_size = 32;
+		rdev->config.rv770.sx_max_export_smx_size = 224;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0xf9;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		if (rdev->config.rv770.sx_max_export_pos_size > 16) {
+			rdev->config.rv770.sx_max_export_pos_size -= 16;
+			rdev->config.rv770.sx_max_export_smx_size += 16;
+		}
+		break;
+	case CHIP_RV710:
+		rdev->config.rv770.max_pipes = 2;
+		rdev->config.rv770.max_tile_pipes = 2;
+		rdev->config.rv770.max_simds = 2;
+		rdev->config.rv770.max_backends = 1;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 192;
+		rdev->config.rv770.max_stack_entries = 256;
+		rdev->config.rv770.max_hw_contexts = 4;
+		rdev->config.rv770.max_gs_threads = 8 * 2;
+		rdev->config.rv770.sx_max_export_size = 128;
+		rdev->config.rv770.sx_max_export_pos_size = 16;
+		rdev->config.rv770.sx_max_export_smx_size = 112;
+		rdev->config.rv770.sq_num_cf_insts = 1;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0x40;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV740:
+		rdev->config.rv770.max_pipes = 4;
+		rdev->config.rv770.max_tile_pipes = 4;
+		rdev->config.rv770.max_simds = 8;
+		rdev->config.rv770.max_backends = 4;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 512;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 256;
+		rdev->config.rv770.sx_max_export_pos_size = 32;
+		rdev->config.rv770.sx_max_export_smx_size = 224;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0x100;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+
+		if (rdev->config.rv770.sx_max_export_pos_size > 16) {
+			rdev->config.rv770.sx_max_export_pos_size -= 16;
+			rdev->config.rv770.sx_max_export_smx_size += 16;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	j = 0;
+	for (i = 0; i < 32; i++) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+		j += 0x18;
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	/* setup tiling, simd, pipe config */
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+	inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+	for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+		if (!(inactive_pipes & tmp)) {
+			active_number++;
+		}
+		tmp <<= 1;
+	}
+	if (active_number == 1) {
+		WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+	} else {
+		WREG32(SPI_CONFIG_CNTL, 0);
+	}
+
+	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+	if (tmp < rdev->config.rv770.max_backends) {
+		rdev->config.rv770.max_backends = tmp;
+	}
+
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+	tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+	if (tmp < rdev->config.rv770.max_pipes) {
+		rdev->config.rv770.max_pipes = tmp;
+	}
+	tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+	if (tmp < rdev->config.rv770.max_simds) {
+		rdev->config.rv770.max_simds = tmp;
+	}
+
+	switch (rdev->config.rv770.max_tile_pipes) {
+	case 1:
+	default:
+		gb_tiling_config = PIPE_TILING(0);
+		break;
+	case 2:
+		gb_tiling_config = PIPE_TILING(1);
+		break;
+	case 4:
+		gb_tiling_config = PIPE_TILING(2);
+		break;
+	case 8:
+		gb_tiling_config = PIPE_TILING(3);
+		break;
+	}
+	rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
+
+	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+	tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+					R7XX_MAX_BACKENDS, disabled_rb_mask);
+	gb_tiling_config |= tmp << 16;
+	rdev->config.rv770.backend_map = tmp;
+
+	if (rdev->family == CHIP_RV770)
+		gb_tiling_config |= BANK_TILING(1);
+	else {
+		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+			gb_tiling_config |= BANK_TILING(1);
+		else
+			gb_tiling_config |= BANK_TILING(0);
+	}
+	rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
+	gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+	if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
+		gb_tiling_config |= ROW_TILING(3);
+		gb_tiling_config |= SAMPLE_SPLIT(3);
+	} else {
+		gb_tiling_config |=
+			ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
+		gb_tiling_config |=
+			SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
+	}
+
+	gb_tiling_config |= BANK_SWAPS(1);
+	rdev->config.rv770.tile_config = gb_tiling_config;
+
+	WREG32(GB_TILING_CONFIG, gb_tiling_config);
+	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
+
+	WREG32(CGTS_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+
+	num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+	WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+
+	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+	ta_aux_cntl = RREG32(TA_CNTL_AUX);
+	WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
+	smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	if (rdev->family != CHIP_RV740)
+		WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
+				       GS_FLUSH_CTL(4) |
+				       ACK_FLUSH_CTL(3) |
+				       SYNC_FLUSH_CTL));
+
+	if (rdev->family != CHIP_RV770)
+		WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
+	db_debug3 = RREG32(DB_DEBUG3);
+	db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV740:
+		db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
+		break;
+	case CHIP_RV710:
+	case CHIP_RV730:
+	default:
+		db_debug3 |= DB_CLK_OFF_DELAY(2);
+		break;
+	}
+	WREG32(DB_DEBUG3, db_debug3);
+
+	if (rdev->family != CHIP_RV770) {
+		db_debug4 = RREG32(DB_DEBUG4);
+		db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
+		WREG32(DB_DEBUG4, db_debug4);
+	}
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
+			    DONE_FIFO_HIWATER(0xe0) |
+			    ALU_UPDATE_FIFO_HIWATER(0x8));
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+		sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
+		break;
+	case CHIP_RV740:
+	default:
+		sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
+		break;
+	}
+	WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (DX9_CONSTS |
+		      VC_ENABLE |
+		      EXPORT_SRC_C |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+	if (rdev->family == CHIP_RV710)
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+
+	WREG32(SQ_CONFIG, sq_config);
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_1,  (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
+					 NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
+					 NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_2,  (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
+					 NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
+
+	sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
+				   NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
+				   NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
+	if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
+		sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
+	else
+		sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
+						     NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
+
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
+						     NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
+
+	sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
+
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	if (rdev->family == CHIP_RV710)
+		WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
+						AUTO_INVLD_EN(ES_AND_GS_AUTO)));
+	else
+		WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
+						AUTO_INVLD_EN(ES_AND_GS_AUTO)));
+
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV740:
+		gs_prim_buffer_depth = 384;
+		break;
+	case CHIP_RV710:
+		gs_prim_buffer_depth = 128;
+		break;
+	default:
+		break;
+	}
+
+	num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
+	vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+	/* Max value for this is 256 */
+	if (vgt_gs_per_es > 256)
+		vgt_gs_per_es = 256;
+
+	WREG32(VGT_ES_PER_GS, 128);
+	WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
+	WREG32(VGT_GS_PER_VS, 2);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+	WREG32(VGT_STRMOUT_EN, 0);
+	WREG32(SX_MISC, 0);
+	WREG32(PA_SC_MODE_CNTL, 0);
+	WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
+	WREG32(PA_SC_AA_CONFIG, 0);
+	WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
+	WREG32(PA_SC_LINE_STIPPLE, 0);
+	WREG32(SPI_INPUT_Z, 0);
+	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	/* clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+
+	WREG32(TCP_CNTL, 0);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+
+	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+					  NUM_CLIP_SEQ(3)));
+	WREG32(VC_ENHANCE, 0);
+}
+
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_bf, size_af;
+
+	if (mc->mc_vram_size > 0xE0000000) {
+		/* leave room for at least 512M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xE0000000;
+		mc->mc_vram_size = 0xE0000000;
+	}
+	if (rdev->flags & RADEON_IS_AGP) {
+		size_bf = mc->gtt_start;
+		size_af = 0xFFFFFFFF - mc->gtt_end;
+		if (size_bf > size_af) {
+			if (mc->mc_vram_size > size_bf) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_bf;
+				mc->mc_vram_size = size_bf;
+			}
+			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+		} else {
+			if (mc->mc_vram_size > size_af) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_af;
+				mc->mc_vram_size = size_af;
+			}
+			mc->vram_start = mc->gtt_end + 1;
+		}
+		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+		dev_info(rdev->dev, "VRAM: %juM 0x%08jX - 0x%08jX (%juM used)\n",
+				(uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start,
+				(uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20);
+	} else {
+		radeon_vram_location(rdev, &rdev->mc, 0);
+		rdev->mc.gtt_base_align = 0;
+		radeon_gtt_location(rdev, mc);
+	}
+}
+
+static int rv770_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+	/* Setup GPU memory space */
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r700_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		  struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFF)
+			cur_size_in_dw = 0xFFFF;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
+static int rv770_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2 link */
+	rv770_pcie_gen2_enable(rdev);
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	rv770_mc_program(rdev);
+	if (rdev->flags & RADEON_IS_AGP) {
+		rv770_agp_enable(rdev);
+	} else {
+		r = rv770_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	rv770_gpu_init(rdev);
+	r = r600_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy.copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = rv770_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = r600_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rv770_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	rdev->accel_working = true;
+	r = rv770_startup(rdev);
+	if (r) {
+		DRM_ERROR("r600 startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int rv770_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r700_cp_stop(rdev);
+	r600_dma_stop(rdev);
+	r600_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	rv770_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int rv770_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	r = rv770_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = rv770_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r700_cp_fini(rdev);
+		r600_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv770_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the ucode is missing. */
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		DRM_ERROR("radeon: ucode required for R600+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void rv770_fini(struct radeon_device *rdev)
+{
+	r600_blit_fini(rdev);
+	r700_cp_fini(rdev);
+	r600_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	rv770_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	r600_fini_microcode(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, tmp;
+	u16 link_cntl2;
+	u32 mask;
+	int ret;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+	if (ret != 0)
+		return;
+
+	if (!(mask & DRM_PCIE_SPEED_50))
+		return;
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	/* advertise upconfig capability */
+	link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+	WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+	if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+		lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+		link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+				     LC_RECONFIG_ARC_MISSING_ESCAPE);
+		link_width_cntl |= lanes | LC_RECONFIG_NOW |
+			LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	} else {
+		link_width_cntl |= LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}


Property changes on: trunk/sys/dev/drm2/radeon/rv770.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/rv770d.h
===================================================================
--- trunk/sys/dev/drm2/radeon/rv770d.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/rv770d.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,674 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef RV770_H
+#define RV770_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/rv770d.h 261455 2014-02-04 03:36:42Z eadler $");
+
+#define R7XX_MAX_SH_GPRS           256
+#define R7XX_MAX_TEMP_GPRS         16
+#define R7XX_MAX_SH_THREADS        256
+#define R7XX_MAX_SH_STACK_ENTRIES  4096
+#define R7XX_MAX_BACKENDS          8
+#define R7XX_MAX_BACKENDS_MASK     0xff
+#define R7XX_MAX_SIMDS             16
+#define R7XX_MAX_SIMDS_MASK        0xffff
+#define R7XX_MAX_PIPES             8
+#define R7XX_MAX_PIPES_MASK        0xff
+
+/* Registers */
+#define	CB_COLOR0_BASE					0x28040
+#define	CB_COLOR1_BASE					0x28044
+#define	CB_COLOR2_BASE					0x28048
+#define	CB_COLOR3_BASE					0x2804C
+#define	CB_COLOR4_BASE					0x28050
+#define	CB_COLOR5_BASE					0x28054
+#define	CB_COLOR6_BASE					0x28058
+#define	CB_COLOR7_BASE					0x2805C
+#define	CB_COLOR7_FRAG					0x280FC
+
+#define	CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)				((x) << 16)
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+#define	CGTS_USER_TCC_DISABLE				0x914C
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define	CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1<<28)
+#define		CP_PFP_HALT					(1<<26)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		STQ_SPLIT(x)					((x) << 0)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1U << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	DB_DEBUG3					0x98B0
+#define		DB_CLK_OFF_DELAY(x)				((x) << 11)
+#define DB_DEBUG4					0x9B8C
+#define		DISABLE_TILE_COVERED_FOR_PS_ITER		(1 << 6)
+
+#define	DCP_TILING_CONFIG				0x6CA0
+#define		PIPE_TILING(x)					((x) << 1)
+#define 	BANK_TILING(x)					((x) << 4)
+#define		GROUP_SIZE(x)					((x) << 6)
+#define		ROW_TILING(x)					((x) << 8)
+#define		BANK_SWAPS(x)					((x) << 11)
+#define		SAMPLE_SPLIT(x)					((x) << 14)
+#define		BACKEND_MAP(x)					((x) << 16)
+
+#define GB_TILING_CONFIG				0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
+
+#define DMA_TILING_CONFIG                               0x3ec8
+#define DMA_TILING_CONFIG2                              0xd0b8
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_QD_PIPES_SHIFT			    8
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1<<0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		GUI_ACTIVE					(1<<31)
+#define	GRBM_STATUS2					0x8014
+
+#define	CG_MULT_THERMAL_STATUS				0x740
+#define		ASIC_T(x)			        ((x) << 16)
+#define		ASIC_T_MASK			        0x3FF0000
+#define		ASIC_T_SHIFT			        16
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+#define HDP_DEBUG1                                      0x2F34
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_VM_MB_L1_TLB0_CNTL				0x2234
+#define	MC_VM_MB_L1_TLB1_CNTL				0x2238
+#define	MC_VM_MB_L1_TLB2_CNTL				0x223C
+#define	MC_VM_MB_L1_TLB3_CNTL				0x2240
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			((x)<<15)
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			((x)<<18)
+#define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+#define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+#define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define PA_SC_AA_CONFIG					0x28C04
+#define PA_SC_CLIPRECT_RULE				0x2820C
+#define	PA_SC_EDGERULE					0x28230
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x)<<0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x)<<16)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define PA_SC_MODE_CNTL					0x28A4C
+#define	PA_SC_MULTI_CHIP_CNTL				0x8B20
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SMX_SAR_CTL0					0xA008
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		CACHE_DEPTH(x)					((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define		DISABLE_INTERP_1				(1 << 5)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		DX9_CONSTS					(1 << 2)
+#define		ALU_INST_PREFER_VECTOR				(1 << 3)
+#define		DX10_CLAMP					(1 << 4)
+#define		CLAUSE_SEQ_PRIO(x)				((x) << 8)
+#define		PS_PRIO(x)					((x) << 24)
+#define		VS_PRIO(x)					((x) << 26)
+#define		GS_PRIO(x)					((x) << 28)
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_0			0x8DB0
+#define		SIMDA_RING0(x)					((x)<<0)
+#define		SIMDA_RING1(x)					((x)<<8)
+#define		SIMDB_RING0(x)					((x)<<16)
+#define		SIMDB_RING1(x)					((x)<<24)
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_1			0x8DB4
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_2			0x8DB8
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_3			0x8DBC
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_4			0x8DC0
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_5			0x8DC4
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_6			0x8DC8
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_7			0x8DCC
+#define		ES_PRIO(x)					((x) << 30)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		DYN_GPR_ENABLE					(1 << 27)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define	SQ_GPR_RESOURCE_MGMT_2				0x8C08
+#define		NUM_GS_GPRS(x)					((x) << 0)
+#define		NUM_ES_GPRS(x)					((x) << 16)
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define	SQ_STACK_RESOURCE_MGMT_1			0x8C10
+#define		NUM_PS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_VS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_2			0x8C14
+#define		NUM_GS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_ES_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_THREAD_RESOURCE_MGMT				0x8C0C
+#define		NUM_PS_THREADS(x)				((x) << 0)
+#define		NUM_VS_THREADS(x)				((x) << 8)
+#define		NUM_GS_THREADS(x)				((x) << 16)
+#define		NUM_ES_THREADS(x)				((x) << 24)
+
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_MISC						0x28350
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+#define		BILINEAR_PRECISION_6_BIT			(0 << 31)
+#define		BILINEAR_PRECISION_8_BIT			(1U << 31)
+
+#define	TCP_CNTL					0x9610
+#define	TCP_CHAN_STEER					0x9614
+
+#define	VC_ENHANCE					0x9714
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x)<<0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_ES_PER_GS					0x88CC
+#define	VGT_GS_PER_ES					0x88C8
+#define	VGT_GS_PER_VS					0x88E8
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_STRMOUT_EN					0x28AB0
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+
+#define	WAIT_UNTIL					0x8040
+
+/* async DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+
+#define	SRBM_STATUS				        0x0E50
+
+/* DCE 3.2 HDMI */
+#define HDMI_CONTROL                         0x7400
+#       define HDMI_KEEPOUT_MODE             (1 << 0)
+#       define HDMI_PACKET_GEN_VERSION       (1 << 4) /* 0 = r6xx compat */
+#       define HDMI_ERROR_ACK                (1 << 8)
+#       define HDMI_ERROR_MASK               (1 << 9)
+#define HDMI_STATUS                          0x7404
+#       define HDMI_ACTIVE_AVMUTE            (1 << 0)
+#       define HDMI_AUDIO_PACKET_ERROR       (1 << 16)
+#       define HDMI_VBI_PACKET_ERROR         (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL            0x7408
+#       define HDMI_AUDIO_DELAY_EN(x)        (((x) & 3) << 4)
+#       define HDMI_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL              0x740c
+#       define HDMI_ACR_SEND                 (1 << 0)
+#       define HDMI_ACR_CONT                 (1 << 1)
+#       define HDMI_ACR_SELECT(x)            (((x) & 3) << 4)
+#       define HDMI_ACR_HW                   0
+#       define HDMI_ACR_32                   1
+#       define HDMI_ACR_44                   2
+#       define HDMI_ACR_48                   3
+#       define HDMI_ACR_SOURCE               (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI_ACR_AUTO_SEND            (1 << 12)
+#define HDMI_VBI_PACKET_CONTROL              0x7410
+#       define HDMI_NULL_SEND                (1 << 0)
+#       define HDMI_GC_SEND                  (1 << 4)
+#       define HDMI_GC_CONT                  (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0              0x7414
+#       define HDMI_AVI_INFO_SEND            (1 << 0)
+#       define HDMI_AVI_INFO_CONT            (1 << 1)
+#       define HDMI_AUDIO_INFO_SEND          (1 << 4)
+#       define HDMI_AUDIO_INFO_CONT          (1 << 5)
+#       define HDMI_MPEG_INFO_SEND           (1 << 8)
+#       define HDMI_MPEG_INFO_CONT           (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1              0x7418
+#       define HDMI_AVI_INFO_LINE(x)         (((x) & 0x3f) << 0)
+#       define HDMI_AUDIO_INFO_LINE(x)       (((x) & 0x3f) << 8)
+#       define HDMI_MPEG_INFO_LINE(x)        (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL          0x741c
+#       define HDMI_GENERIC0_SEND            (1 << 0)
+#       define HDMI_GENERIC0_CONT            (1 << 1)
+#       define HDMI_GENERIC1_SEND            (1 << 4)
+#       define HDMI_GENERIC1_CONT            (1 << 5)
+#       define HDMI_GENERIC0_LINE(x)         (((x) & 0x3f) << 16)
+#       define HDMI_GENERIC1_LINE(x)         (((x) & 0x3f) << 24)
+#define HDMI_GC                              0x7428
+#       define HDMI_GC_AVMUTE                (1 << 0)
+#define AFMT_AUDIO_PACKET_CONTROL2           0x742c
+#       define AFMT_AUDIO_LAYOUT_OVRD        (1 << 0)
+#       define AFMT_AUDIO_LAYOUT_SELECT      (1 << 1)
+#       define AFMT_60958_CS_SOURCE          (1 << 4)
+#       define AFMT_AUDIO_CHANNEL_ENABLE(x)  (((x) & 0xff) << 8)
+#       define AFMT_DP_AUDIO_STREAM_ID(x)    (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0                       0x7454
+#       define AFMT_AVI_INFO_CHECKSUM(x)     (((x) & 0xff) << 0)
+#       define AFMT_AVI_INFO_S(x)            (((x) & 3) << 8)
+#       define AFMT_AVI_INFO_B(x)            (((x) & 3) << 10)
+#       define AFMT_AVI_INFO_A(x)            (((x) & 1) << 12)
+#       define AFMT_AVI_INFO_Y(x)            (((x) & 3) << 13)
+#       define AFMT_AVI_INFO_Y_RGB           0
+#       define AFMT_AVI_INFO_Y_YCBCR422      1
+#       define AFMT_AVI_INFO_Y_YCBCR444      2
+#       define AFMT_AVI_INFO_Y_A_B_S(x)      (((x) & 0xff) << 8)
+#       define AFMT_AVI_INFO_R(x)            (((x) & 0xf) << 16)
+#       define AFMT_AVI_INFO_M(x)            (((x) & 0x3) << 20)
+#       define AFMT_AVI_INFO_C(x)            (((x) & 0x3) << 22)
+#       define AFMT_AVI_INFO_C_M_R(x)        (((x) & 0xff) << 16)
+#       define AFMT_AVI_INFO_SC(x)           (((x) & 0x3) << 24)
+#       define AFMT_AVI_INFO_Q(x)            (((x) & 0x3) << 26)
+#       define AFMT_AVI_INFO_EC(x)           (((x) & 0x3) << 28)
+#       define AFMT_AVI_INFO_ITC(x)          (((x) & 0x1) << 31)
+#       define AFMT_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1                       0x7458
+#       define AFMT_AVI_INFO_VIC(x)          (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_PR(x)           (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_TOP(x)          (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2                       0x745c
+#       define AFMT_AVI_INFO_BOTTOM(x)       (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_LEFT(x)         (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3                       0x7460
+#       define AFMT_AVI_INFO_RIGHT(x)        (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_VERSION(x)      (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0                      0x7464
+#       define AFMT_MPEG_INFO_CHECKSUM(x)    (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MB0(x)         (((x) & 0xff) << 8)
+#       define AFMT_MPEG_INFO_MB1(x)         (((x) & 0xff) << 16)
+#       define AFMT_MPEG_INFO_MB2(x)         (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1                      0x7468
+#       define AFMT_MPEG_INFO_MB3(x)         (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MF(x)          (((x) & 3) << 8)
+#       define AFMT_MPEG_INFO_FR(x)          (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR                    0x746c
+#define AFMT_GENERIC0_0                      0x7470
+#define AFMT_GENERIC0_1                      0x7474
+#define AFMT_GENERIC0_2                      0x7478
+#define AFMT_GENERIC0_3                      0x747c
+#define AFMT_GENERIC0_4                      0x7480
+#define AFMT_GENERIC0_5                      0x7484
+#define AFMT_GENERIC0_6                      0x7488
+#define AFMT_GENERIC1_HDR                    0x748c
+#define AFMT_GENERIC1_0                      0x7490
+#define AFMT_GENERIC1_1                      0x7494
+#define AFMT_GENERIC1_2                      0x7498
+#define AFMT_GENERIC1_3                      0x749c
+#define AFMT_GENERIC1_4                      0x74a0
+#define AFMT_GENERIC1_5                      0x74a4
+#define AFMT_GENERIC1_6                      0x74a8
+#define HDMI_ACR_32_0                        0x74ac
+#       define HDMI_ACR_CTS_32(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1                        0x74b0
+#       define HDMI_ACR_N_32(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0                        0x74b4
+#       define HDMI_ACR_CTS_44(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1                        0x74b8
+#       define HDMI_ACR_N_44(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0                        0x74bc
+#       define HDMI_ACR_CTS_48(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1                        0x74c0
+#       define HDMI_ACR_N_48(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0                    0x74c4
+#define HDMI_ACR_STATUS_1                    0x74c8
+#define AFMT_AUDIO_INFO0                     0x74cc
+#       define AFMT_AUDIO_INFO_CHECKSUM(x)   (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_CC(x)         (((x) & 7) << 8)
+#       define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x)   (((x) & 0xff) << 16)
+#define AFMT_AUDIO_INFO1                     0x74d0
+#       define AFMT_AUDIO_INFO_CA(x)         (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_LSV(x)        (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_DM_INH(x)     (((x) & 1) << 15)
+#       define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define AFMT_60958_0                         0x74d4
+#       define AFMT_60958_CS_A(x)            (((x) & 1) << 0)
+#       define AFMT_60958_CS_B(x)            (((x) & 1) << 1)
+#       define AFMT_60958_CS_C(x)            (((x) & 1) << 2)
+#       define AFMT_60958_CS_D(x)            (((x) & 3) << 3)
+#       define AFMT_60958_CS_MODE(x)         (((x) & 3) << 6)
+#       define AFMT_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define AFMT_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define AFMT_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#define AFMT_60958_1                         0x74d8
+#       define AFMT_60958_CS_WORD_LENGTH(x)  (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_VALID_L(x)      (((x) & 1) << 16)
+#       define AFMT_60958_CS_VALID_R(x)      (((x) & 1) << 18)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL               0x74dc
+#       define AFMT_AUDIO_CRC_EN             (1 << 0)
+#define AFMT_RAMP_CONTROL0                   0x74e0
+#       define AFMT_RAMP_MAX_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_RAMP_DATA_SIGN           (1U << 31)
+#define AFMT_RAMP_CONTROL1                   0x74e4
+#       define AFMT_RAMP_MIN_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2                   0x74e8
+#       define AFMT_RAMP_INC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3                   0x74ec
+#       define AFMT_RAMP_DEC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_60958_2                         0x74f0
+#       define AFMT_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+#define AFMT_STATUS                          0x7600
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x7604
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL              0x7608
+#       define AFMT_GENERIC0_UPDATE          (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0              0x760c
+#       define AFMT_AUDIO_INFO_SOURCE        (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+#       define AFMT_AUDIO_INFO_UPDATE        (1 << 7)
+#       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
+#define AFMT_GENERIC0_7                      0x7610
+/* second instance starts at 0x7800 */
+#define HDMI_OFFSET0                      (0x7400 - 0x7400)
+#define HDMI_OFFSET1                      (0x7800 - 0x7400)
+
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x71fc /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x7300
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1U << 31)
+
+
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x611c
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/rv770d.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/si.c
===================================================================
--- trunk/sys/dev/drm2/radeon/si.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/si.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,4454 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/si.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <dev/drm2/radeon/radeon_drm.h>
+#include "sid.h"
+#include "atom.h"
+#include "si_blit_shaders.h"
+
+#define SI_PFP_UCODE_SIZE 2144
+#define SI_PM4_UCODE_SIZE 2144
+#define SI_CE_UCODE_SIZE 2144
+#define SI_RLC_UCODE_SIZE 2048
+#define SI_MC_UCODE_SIZE 7769
+
+#ifdef __linux__
+MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
+MODULE_FIRMWARE("radeon/TAHITI_me.bin");
+MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
+MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
+MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
+MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
+MODULE_FIRMWARE("radeon/VERDE_me.bin");
+MODULE_FIRMWARE("radeon/VERDE_ce.bin");
+MODULE_FIRMWARE("radeon/VERDE_mc.bin");
+MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+#endif
+
+#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
+extern int r600_ih_ring_alloc(struct radeon_device *rdev);
+extern void r600_ih_ring_fini(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+#endif
+
+/* get temperature in millidegrees */
+int si_get_temp(struct radeon_device *rdev)
+{
+	u32 temp;
+	int actual_temp = 0;
+
+	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+		CTF_TEMP_SHIFT;
+
+	if (temp & 0x200)
+		actual_temp = 255;
+	else
+		actual_temp = temp & 0x1ff;
+
+	actual_temp = (actual_temp * 1000);
+
+	return actual_temp;
+}
+
+#define TAHITI_IO_MC_REGS_SIZE 36
+
+static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a77400}
+};
+
+static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a47400}
+};
+
+static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a37400}
+};
+
+/* ucode loading */
+static int si_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	u32 running, blackout = 0;
+	const u32 *io_mc_regs;
+	int i, ucode_size, regs_size;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		io_mc_regs = &tahiti_io_mc_regs[0][0];
+		ucode_size = SI_MC_UCODE_SIZE;
+		regs_size = TAHITI_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_PITCAIRN:
+		io_mc_regs = &pitcairn_io_mc_regs[0][0];
+		ucode_size = SI_MC_UCODE_SIZE;
+		regs_size = TAHITI_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_VERDE:
+	default:
+		io_mc_regs = &verde_io_mc_regs[0][0];
+		ucode_size = SI_MC_UCODE_SIZE;
+		regs_size = TAHITI_IO_MC_REGS_SIZE;
+		break;
+	}
+
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if (running == 0) {
+		if (running) {
+			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+		}
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+		}
+		/* load the MC ucode */
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+		for (i = 0; i < ucode_size; i++)
+			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+				break;
+			udelay(1);
+		}
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+				break;
+			udelay(1);
+		}
+
+		if (running)
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+	}
+
+	return 0;
+}
+
+static int si_init_microcode(struct radeon_device *rdev)
+{
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		chip_name = "TAHITI";
+		rlc_chip_name = "TAHITI";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_PITCAIRN:
+		chip_name = "PITCAIRN";
+		rlc_chip_name = "PITCAIRN";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_VERDE:
+		chip_name = "VERDE";
+		rlc_chip_name = "VERDE";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		break;
+	default: panic("%s: Unsupported family %d", __func__, rdev->family);
+	}
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+	err = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name);
+	rdev->pfp_fw = firmware_get(fw_name);
+	if (rdev->pfp_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->pfp_fw->datasize != pfp_req_size) {
+		DRM_ERROR(
+		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->datasize, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name);
+	rdev->me_fw = firmware_get(fw_name);
+	if (rdev->me_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->me_fw->datasize != me_req_size) {
+		DRM_ERROR(
+		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_ce", chip_name);
+	rdev->ce_fw = firmware_get(fw_name);
+	if (rdev->ce_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->ce_fw->datasize != ce_req_size) {
+		DRM_ERROR(
+		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->ce_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", rlc_chip_name);
+	rdev->rlc_fw = firmware_get(fw_name);
+	if (rdev->rlc_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->rlc_fw->datasize != rlc_req_size) {
+		DRM_ERROR(
+		       "si_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc", chip_name);
+	rdev->mc_fw = firmware_get(fw_name);
+	if (rdev->mc_fw == NULL) {
+		err = -ENOENT;
+		goto out;
+	}
+	if (rdev->mc_fw->datasize != mc_req_size) {
+		DRM_ERROR(
+		       "si_mc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->mc_fw->datasize, fw_name);
+		err = -EINVAL;
+	}
+
+out:
+	if (err) {
+		if (err != -EINVAL)
+			DRM_ERROR(
+			       "si_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		if (rdev->pfp_fw != NULL) {
+			firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD);
+			rdev->pfp_fw = NULL;
+		}
+		if (rdev->me_fw != NULL) {
+			firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
+			rdev->me_fw = NULL;
+		}
+		if (rdev->ce_fw != NULL) {
+			firmware_put(rdev->ce_fw, FIRMWARE_UNLOAD);
+			rdev->ce_fw = NULL;
+		}
+		if (rdev->rlc_fw != NULL) {
+			firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD);
+			rdev->rlc_fw = NULL;
+		}
+		if (rdev->mc_fw != NULL) {
+			firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD);
+			rdev->mc_fw = NULL;
+		}
+	}
+	return err;
+}
+
+/**
+ * si_fini_microcode - drop the firmwares image references
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Drop the pfp, me, rlc, mc and ce firmware image references.
+ * Called at driver shutdown.
+ */
+static void si_fini_microcode(struct radeon_device *rdev)
+{
+
+	if (rdev->pfp_fw != NULL) {
+		firmware_put(rdev->pfp_fw, FIRMWARE_UNLOAD);
+		rdev->pfp_fw = NULL;
+	}
+
+	if (rdev->me_fw != NULL) {
+		firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
+		rdev->me_fw = NULL;
+	}
+
+	if (rdev->rlc_fw != NULL) {
+		firmware_put(rdev->rlc_fw, FIRMWARE_UNLOAD);
+		rdev->rlc_fw = NULL;
+	}
+
+	if (rdev->mc_fw != NULL) {
+		firmware_put(rdev->mc_fw, FIRMWARE_UNLOAD);
+		rdev->mc_fw = NULL;
+	}
+
+	if (rdev->ce_fw != NULL) {
+		firmware_put(rdev->ce_fw, FIRMWARE_UNLOAD);
+		rdev->ce_fw = NULL;
+	}
+}
+
+/* watermark setup */
+static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
+				   struct radeon_crtc *radeon_crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *other_mode)
+{
+	u32 tmp;
+	/*
+	 * Line Buffer Setup
+	 * There are 3 line buffers, each one shared by 2 display controllers.
+	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning is done via one of four
+	 * preset allocations specified in bits 21:20:
+	 *  0 - half lb
+	 *  2 - whole lb, other crtc must be disabled
+	 */
+	/* this can get tricky if we have two large displays on a paired group
+	 * of crtcs.  Ideally for multiple large displays we'd assign them to
+	 * non-linked crtcs for maximum line buffer allocation.
+	 */
+	if (radeon_crtc->base.enabled && mode) {
+		if (other_mode)
+			tmp = 0; /* 1/2 */
+		else
+			tmp = 2; /* whole */
+	} else
+		tmp = 0;
+
+	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
+	       DC_LB_MEMORY_CONFIG(tmp));
+
+	if (radeon_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		default:
+			return 4096 * 2;
+		case 2:
+			return 8192 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	case 4:
+		return 3;
+	case 5:
+		return 6;
+	case 6:
+		return 10;
+	case 7:
+		return 12;
+	case 8:
+		return 16;
+	}
+}
+
+struct dce6_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate raw DRAM Bandwidth */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
+{
+	return 32;
+}
+
+static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, sclk, bandwidth;
+	fixed20_12 a, b1, b2;
+	u32 min_bandwidth;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
+	b1.full = dfixed_mul(a, disp_clk);
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
+	b2.full = dfixed_mul(a, sclk);
+
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+	min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
+
+	a.full = dfixed_const(min_bandwidth);
+	bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = dce6_dram_bandwidth(wm);
+	u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
+{
+	/* First calcualte the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = dce6_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	u32 tmp, dmif_size = 12288;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+
+	b.full = dfixed_const(mc_latency + 512);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(b, c);
+
+	c.full = dfixed_const(dmif_size);
+	b.full = dfixed_div(c, b);
+
+	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(c, b);
+	c.full = dfixed_const(wm->bytes_per_pixel);
+	b.full = dfixed_mul(b, c);
+
+	lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	if (dce6_average_bandwidth(wm) <=
+	    (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+	if (dce6_average_bandwidth(wm) <=
+	    (dce6_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (dce6_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+static void dce6_program_watermarks(struct radeon_device *rdev,
+					 struct radeon_crtc *radeon_crtc,
+					 u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &radeon_crtc->base.mode;
+	struct dce6_wm_params wm;
+	u32 pixel_period;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 priority_a_mark = 0, priority_b_mark = 0;
+	u32 priority_a_cnt = PRIORITY_OFF;
+	u32 priority_b_cnt = PRIORITY_OFF;
+	u32 tmp, arb_control3;
+	fixed20_12 a, b, c;
+
+	if (radeon_crtc->base.enabled && num_heads && mode) {
+		pixel_period = 1000000 / (u32)mode->clock;
+		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		priority_a_cnt = 0;
+		priority_b_cnt = 0;
+
+		wm.yclk = rdev->pm.current_mclk * 10;
+		wm.sclk = rdev->pm.current_sclk * 10;
+		wm.disp_clk = mode->clock;
+		wm.src_width = mode->crtc_hdisplay;
+		wm.active_time = mode->crtc_hdisplay * pixel_period;
+		wm.blank_time = line_time - wm.active_time;
+		wm.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm.interlaced = true;
+		wm.vsc = radeon_crtc->vsc;
+		wm.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm.vtaps = 2;
+		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm.lb_size = lb_size;
+		if (rdev->family == CHIP_ARUBA)
+			wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+		else
+			wm.dram_channels = si_get_number_of_dram_channels(rdev);
+		wm.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
+		/* set for low clocks */
+		/* wm.yclk = low clk; wm.sclk = low clk */
+		latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+		    !dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
+		    !dce6_check_latency_hiding(&wm) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_a);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_a_mark = dfixed_trunc(c);
+		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_b);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_b_mark = dfixed_trunc(c);
+		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+	}
+
+	/* select wm A */
+	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
+	tmp = arb_control3;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
+
+	/* write the priority marks */
+	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
+void dce6_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	radeon_update_display_priority(rdev);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < rdev->num_crtc; i += 2) {
+		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+	}
+}
+
+/*
+ * Core functions
+ */
+static void si_tiling_mode_table_init(struct radeon_device *rdev)
+{
+	const u32 num_tile_mode_states = 32;
+	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+	switch (rdev->config.si.mem_row_size_in_kb) {
+	case 1:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+		break;
+	case 2:
+	default:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+		break;
+	case 4:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+		break;
+	}
+
+	if ((rdev->family == CHIP_TAHITI) ||
+	    (rdev->family == CHIP_PITCAIRN)) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:  /* non-AA compressed depth or any compressed stencil */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 1:  /* 2xAA/4xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 2:  /* 8xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 8:  /* 1D and 1D Array Surfaces */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 9:  /* Displayable maps. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 10:  /* Display 8bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 11:  /* Display 16bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 12:  /* Display 32bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 13:  /* Thin. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 14:  /* Thin 8 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 15:  /* Thin 16 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 16:  /* Thin 32 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 17:  /* Thin 64 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 21:  /* 8 bpp PRT. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 22:  /* 16 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 23:  /* 32 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 24:  /* 64 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 25:  /* 128 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+						 NUM_BANKS(ADDR_SURF_8_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+		}
+	} else if (rdev->family == CHIP_VERDE) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:  /* non-AA compressed depth or any compressed stencil */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 1:  /* 2xAA/4xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 2:  /* 8xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 8:  /* 1D and 1D Array Surfaces */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 9:  /* Displayable maps. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 10:  /* Display 8bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 11:  /* Display 16bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 12:  /* Display 32bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 13:  /* Thin. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 14:  /* Thin 8 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 15:  /* Thin 16 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 16:  /* Thin 32 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 17:  /* Thin 64 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 21:  /* 8 bpp PRT. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 22:  /* 16 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 23:  /* 32 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 24:  /* 64 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 25:  /* 128 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+						 NUM_BANKS(ADDR_SURF_8_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+		}
+	} else
+		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
+}
+
+static void si_select_se_sh(struct radeon_device *rdev,
+			    u32 se_num, u32 sh_num)
+{
+	u32 data = INSTANCE_BROADCAST_WRITES;
+
+	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+	else if (se_num == 0xffffffff)
+		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+	else if (sh_num == 0xffffffff)
+		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+	else
+		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+	WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+	u32 i, mask = 0;
+
+	for (i = 0; i < bit_width; i++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+	return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	if (data & 1)
+		data &= INACTIVE_CUS_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+	data >>= INACTIVE_CUS_SHIFT;
+
+	mask = si_create_bitmask(cu_per_sh);
+
+	return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+			 u32 se_num, u32 sh_per_se,
+			 u32 cu_per_sh)
+{
+	int i, j, k;
+	u32 data, mask, active_cu;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			si_select_se_sh(rdev, i, j);
+			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+			active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+			mask = 1;
+			for (k = 0; k < 16; k++) {
+				mask <<= k;
+				if (active_cu & mask) {
+					data &= ~mask;
+					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+					break;
+				}
+			}
+		}
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+			      u32 max_rb_num, u32 se_num,
+			      u32 sh_per_se)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_RB_BACKEND_DISABLE);
+	if (data & 1)
+		data &= BACKEND_DISABLE_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+	data >>= BACKEND_DISABLE_SHIFT;
+
+	mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+	return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+			u32 se_num, u32 sh_per_se,
+			u32 max_rb_num)
+{
+	int i, j;
+	u32 data, mask;
+	u32 disabled_rbs = 0;
+	u32 enabled_rbs = 0;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			si_select_se_sh(rdev, i, j);
+			data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+		}
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+	mask = 1;
+	for (i = 0; i < max_rb_num; i++) {
+		if (!(disabled_rbs & mask))
+			enabled_rbs |= mask;
+		mask <<= 1;
+	}
+
+	for (i = 0; i < se_num; i++) {
+		si_select_se_sh(rdev, i, 0xffffffff);
+		data = 0;
+		for (j = 0; j < sh_per_se; j++) {
+			switch (enabled_rbs & 3) {
+			case 1:
+				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+				break;
+			case 2:
+				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+				break;
+			case 3:
+			default:
+				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+				break;
+			}
+			enabled_rbs >>= 2;
+		}
+		WREG32(PA_SC_RASTER_CONFIG, data);
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static void si_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 sx_debug_1;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+	int i, j;
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		rdev->config.si.max_shader_engines = 2;
+		rdev->config.si.max_tile_pipes = 12;
+		rdev->config.si.max_cu_per_sh = 8;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 12;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_PITCAIRN:
+		rdev->config.si.max_shader_engines = 2;
+		rdev->config.si.max_tile_pipes = 8;
+		rdev->config.si.max_cu_per_sh = 5;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 8;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_VERDE:
+	default:
+		rdev->config.si.max_shader_engines = 1;
+		rdev->config.si.max_tile_pipes = 4;
+		rdev->config.si.max_cu_per_sh = 2;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 4;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
+	rdev->config.si.mem_max_burst_length_bytes = 256;
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (rdev->config.si.mem_row_size_in_kb > 4)
+		rdev->config.si.mem_row_size_in_kb = 4;
+	/* XXX use MC settings? */
+	rdev->config.si.shader_engine_tile_size = 32;
+	rdev->config.si.num_gpus = 1;
+	rdev->config.si.multi_gpu_tile_size = 64;
+
+	/* fix up row size */
+	gb_addr_config &= ~ROW_SIZE_MASK;
+	switch (rdev->config.si.mem_row_size_in_kb) {
+	case 1:
+	default:
+		gb_addr_config |= ROW_SIZE(0);
+		break;
+	case 2:
+		gb_addr_config |= ROW_SIZE(1);
+		break;
+	case 4:
+		gb_addr_config |= ROW_SIZE(2);
+		break;
+	}
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.si.tile_config = 0;
+	switch (rdev->config.si.num_tile_pipes) {
+	case 1:
+		rdev->config.si.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.si.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.si.tile_config |= (2 << 0);
+		break;
+	case 8:
+	default:
+		/* XXX what about 12? */
+		rdev->config.si.tile_config |= (3 << 0);
+		break;
+	}	
+	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+	case 0: /* four banks */
+		rdev->config.si.tile_config |= 0 << 4;
+		break;
+	case 1: /* eight banks */
+		rdev->config.si.tile_config |= 1 << 4;
+		break;
+	case 2: /* sixteen banks */
+	default:
+		rdev->config.si.tile_config |= 2 << 4;
+		break;
+	}
+	rdev->config.si.tile_config |=
+		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+	rdev->config.si.tile_config |=
+		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+
+	si_tiling_mode_table_init(rdev);
+
+	si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+		    rdev->config.si.max_sh_per_se,
+		    rdev->config.si.max_backends_per_se);
+
+	si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+		     rdev->config.si.max_sh_per_se,
+		     rdev->config.si.max_cu_per_sh);
+
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
+				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_CONFIG, 0);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+static void si_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->scratch.num_reg = 7;
+	rdev->scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+void si_fence_ring_emit(struct radeon_device *rdev,
+			struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* flush read cache over gart */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+			  PACKET3_TC_ACTION_ENA |
+			  PACKET3_SH_KCACHE_ACTION_ENA |
+			  PACKET3_SH_ICACHE_ACTION_ENA);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 10); /* poll interval */
+	/* EVENT_WRITE_EOP - flush caches, send int */
+	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+	radeon_ring_write(ring, addr & 0xffffffff);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, 0);
+}
+
+/*
+ * IB stuff
+ */
+void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 header;
+
+	if (ib->is_const_ib) {
+		/* set switch buffer packet before const IB */
+		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		radeon_ring_write(ring, 0);
+
+		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+	} else {
+		u32 next_rptr;
+		if (ring->rptr_save_reg) {
+			next_rptr = ring->wptr + 3 + 4 + 8;
+			radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+			radeon_ring_write(ring, ((ring->rptr_save_reg -
+						  PACKET3_SET_CONFIG_REG_START) >> 2));
+			radeon_ring_write(ring, next_rptr);
+		} else if (rdev->wb.enabled) {
+			next_rptr = ring->wptr + 5 + 4 + 8;
+			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+			radeon_ring_write(ring, (1 << 8));
+			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+			radeon_ring_write(ring, next_rptr);
+		}
+
+		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+	}
+
+	radeon_ring_write(ring, header);
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+	radeon_ring_write(ring, ib->length_dw |
+			  (ib->vm ? (ib->vm->id << 24) : 0));
+
+	if (!ib->is_const_ib) {
+		/* flush read cache over gart for this vmid */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+				  PACKET3_TC_ACTION_ENA |
+				  PACKET3_SH_KCACHE_ACTION_ENA |
+				  PACKET3_SH_ICACHE_ACTION_ENA);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+	}
+}
+
+/*
+ * CP.
+ */
+static void si_cp_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+		WREG32(SCRATCH_UMSK, 0);
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	}
+	udelay(50);
+}
+
+static int si_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	si_cp_enable(rdev, false);
+
+	/* PFP */
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	/* CE */
+	fw_data = (const __be32 *)rdev->ce_fw->data;
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+		WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_CE_UCODE_ADDR, 0);
+
+	/* ME */
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_ME_RAM_WADDR, 0);
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int si_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+
+	r = radeon_ring_lock(rdev, ring, 7 + 4);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	/* init the CP */
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+
+	/* init the CE partitions */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+	radeon_ring_write(ring, 0xc000);
+	radeon_ring_write(ring, 0xe000);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	si_cp_enable(rdev, true);
+
+	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < si_default_size; i++)
+		radeon_ring_write(ring, si_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
+
+	radeon_ring_unlock_commit(rdev, ring);
+
+	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
+		ring = &rdev->ring[i];
+		r = radeon_ring_lock(rdev, ring, 2);
+
+		/* clear the compute context state */
+		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
+		radeon_ring_write(ring, 0);
+
+		radeon_ring_unlock_commit(rdev, ring);
+	}
+
+	return 0;
+}
+
+static void si_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	si_cp_enable(rdev, false);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+static int si_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SPI |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	WREG32(CP_DEBUG, 0);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	/* ring 0 - compute and gfx */
+	/* Set ring buffer size */
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB0_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB0_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB0_CNTL, tmp);
+
+	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+	ring->rptr = RREG32(CP_RB0_RPTR);
+
+	/* ring1  - compute only */
+	/* Set ring buffer size */
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB1_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB1_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB1_CNTL, tmp);
+
+	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+	ring->rptr = RREG32(CP_RB1_RPTR);
+
+	/* ring2 - compute only */
+	/* Set ring buffer size */
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB2_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB2_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB2_CNTL, tmp);
+
+	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+	ring->rptr = RREG32(CP_RB2_RPTR);
+
+	/* start the rings */
+	si_cp_start(rdev);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	if (r) {
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+		return r;
+	}
+	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+	if (r) {
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+	}
+	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+	if (r) {
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	}
+
+	return 0;
+}
+
+bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 srbm_status;
+	u32 grbm_status, grbm_status2;
+	u32 grbm_status_se0, grbm_status_se1;
+
+	srbm_status = RREG32(SRBM_STATUS);
+	grbm_status = RREG32(GRBM_STATUS);
+	grbm_status2 = RREG32(GRBM_STATUS2);
+	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+	if (!(grbm_status & GUI_ACTIVE)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
+{
+	u32 grbm_reset = 0;
+
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		return;
+
+	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS2=0x%08X\n",
+		RREG32(GRBM_STATUS2));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
+		RREG32(SRBM_STATUS));
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+	/* reset all the gfx blocks */
+	grbm_reset = (SOFT_RESET_CP |
+		      SOFT_RESET_CB |
+		      SOFT_RESET_DB |
+		      SOFT_RESET_GDS |
+		      SOFT_RESET_PA |
+		      SOFT_RESET_SC |
+		      SOFT_RESET_BCI |
+		      SOFT_RESET_SPI |
+		      SOFT_RESET_SX |
+		      SOFT_RESET_TC |
+		      SOFT_RESET_TA |
+		      SOFT_RESET_VGT |
+		      SOFT_RESET_IA);
+
+	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+	WREG32(GRBM_SOFT_RESET, grbm_reset);
+	(void)RREG32(GRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(GRBM_SOFT_RESET, 0);
+	(void)RREG32(GRBM_SOFT_RESET);
+
+	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS2=0x%08X\n",
+		RREG32(GRBM_STATUS2));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
+		RREG32(SRBM_STATUS));
+}
+
+static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+		return;
+
+	dev_info(rdev->dev, "  DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+
+	/* dma0 */
+	tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+	/* dma1 */
+	tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+	/* Reset dma */
+	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	dev_info(rdev->dev, "  DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+}
+
+static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+		reset_mask &= ~RADEON_RESET_DMA;
+
+	if (reset_mask == 0)
+		return 0;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+	evergreen_mc_stop(rdev, &save);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+		si_gpu_soft_reset_gfx(rdev);
+
+	if (reset_mask & RADEON_RESET_DMA)
+		si_gpu_soft_reset_dma(rdev);
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	return 0;
+}
+
+int si_asic_reset(struct radeon_device *rdev)
+{
+	return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+					RADEON_RESET_COMPUTE |
+					RADEON_RESET_DMA));
+}
+
+/* MC */
+static void si_mc_program(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	evergreen_mc_stop(rdev, &save);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture*/
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+	       rdev->mc.vram_start >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+	       rdev->mc.vram_end >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+	       rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	/* XXX double check these! */
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	WREG32(MC_VM_AGP_BASE, 0);
+	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	evergreen_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+/* SI MC address space is 40 bits */
+static void si_vram_location(struct radeon_device *rdev,
+			     struct radeon_mc *mc, u64 base)
+{
+	mc->vram_start = base;
+	if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
+		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+		mc->real_vram_size = mc->aper_size;
+		mc->mc_vram_size = mc->aper_size;
+	}
+	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+	dev_info(rdev->dev, "VRAM: %juM 0x%016jX - 0x%016jX (%juM used)\n",
+			(uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start,
+			(uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20);
+}
+
+static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_af, size_bf;
+
+	size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+	size_bf = mc->vram_start & ~mc->gtt_base_align;
+	if (size_bf > size_af) {
+		if (mc->gtt_size > size_bf) {
+			dev_warn(rdev->dev, "limiting GTT\n");
+			mc->gtt_size = size_bf;
+		}
+		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
+	} else {
+		if (mc->gtt_size > size_af) {
+			dev_warn(rdev->dev, "limiting GTT\n");
+			mc->gtt_size = size_af;
+		}
+		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+	}
+	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+	dev_info(rdev->dev, "GTT: %juM 0x%016jX - 0x%016jX\n",
+			(uintmax_t)mc->gtt_size >> 20, (uintmax_t)mc->gtt_start, (uintmax_t)mc->gtt_end);
+}
+
+static void si_vram_gtt_location(struct radeon_device *rdev,
+				 struct radeon_mc *mc)
+{
+	if (mc->mc_vram_size > 0xFFC0000000ULL) {
+		/* leave room for at least 1024M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xFFC0000000ULL;
+		mc->mc_vram_size = 0xFFC0000000ULL;
+	}
+	si_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	si_gtt_location(rdev, mc);
+}
+
+static int si_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	case 4:
+		numchan = 3;
+		break;
+	case 5:
+		numchan = 6;
+		break;
+	case 6:
+		numchan = 10;
+		break;
+	case 7:
+		numchan = 12;
+		break;
+	case 8:
+		numchan = 16;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+	/* size in MB on si */
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	si_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+/*
+ * GART
+ */
+void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* flush hdp cache */
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+static int si_pcie_gart_enable(struct radeon_device *rdev)
+{
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+	WREG32(0x15D4, 0);
+	WREG32(0x15D8, 0);
+	WREG32(0x15DC, 0);
+
+	/* empty context1-15 */
+	/* set vm size, must be a multiple of 4 */
+	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+	/* Assign the pt base to something valid for now; the pts used for
+	 * the VMs are determined by the application and setup and assigned
+	 * on the fly in the vm part of radeon_gart.c
+	 */
+	for (i = 1; i < 16; i++) {
+		if (i < 8)
+			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+			       rdev->gart.table_addr >> 12);
+		else
+			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
+			       rdev->gart.table_addr >> 12);
+	}
+
+	/* enable context1-15 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	si_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void si_pcie_gart_disable(struct radeon_device *rdev)
+{
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void si_pcie_gart_fini(struct radeon_device *rdev)
+{
+	si_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+/* vm parser */
+static bool si_vm_reg_valid(u32 reg)
+{
+	/* context regs are fine */
+	if (reg >= 0x28000)
+		return true;
+
+	/* check config regs */
+	switch (reg) {
+	case GRBM_GFX_INDEX:
+	case CP_STRMOUT_CNTL:
+	case VGT_VTX_VECT_EJECT_REG:
+	case VGT_CACHE_INVALIDATION:
+	case VGT_ESGS_RING_SIZE:
+	case VGT_GSVS_RING_SIZE:
+	case VGT_GS_VERTEX_REUSE:
+	case VGT_PRIMITIVE_TYPE:
+	case VGT_INDEX_TYPE:
+	case VGT_NUM_INDICES:
+	case VGT_NUM_INSTANCES:
+	case VGT_TF_RING_SIZE:
+	case VGT_HS_OFFCHIP_PARAM:
+	case VGT_TF_MEMORY_BASE:
+	case PA_CL_ENHANCE:
+	case PA_SU_LINE_STIPPLE_VALUE:
+	case PA_SC_LINE_STIPPLE_STATE:
+	case PA_SC_ENHANCE:
+	case SQC_CACHES:
+	case SPI_STATIC_THREAD_MGMT_1:
+	case SPI_STATIC_THREAD_MGMT_2:
+	case SPI_STATIC_THREAD_MGMT_3:
+	case SPI_PS_MAX_WAVE_ID:
+	case SPI_CONFIG_CNTL:
+	case SPI_CONFIG_CNTL_1:
+	case TA_CNTL_AUX:
+		return true;
+	default:
+		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+		return false;
+	}
+}
+
+static int si_vm_packet3_ce_check(struct radeon_device *rdev,
+				  u32 *ib, struct radeon_cs_packet *pkt)
+{
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_SET_CE_DE_COUNTERS:
+	case PACKET3_LOAD_CONST_RAM:
+	case PACKET3_WRITE_CONST_RAM:
+	case PACKET3_WRITE_CONST_RAM_OFFSET:
+	case PACKET3_DUMP_CONST_RAM:
+	case PACKET3_INCREMENT_CE_COUNTER:
+	case PACKET3_WAIT_ON_DE_COUNTER:
+	case PACKET3_CE_WRITE:
+		break;
+	default:
+		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
+				   u32 *ib, struct radeon_cs_packet *pkt)
+{
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, end_reg, reg, i;
+	u32 command, info;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_INDEX_BUFFER_SIZE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_ALLOC_GDS:
+	case PACKET3_WRITE_GDS_RAM:
+	case PACKET3_ATOMIC_GDS:
+	case PACKET3_ATOMIC:
+	case PACKET3_OCCLUSION_QUERY:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_DRAW_INDIRECT:
+	case PACKET3_DRAW_INDEX_INDIRECT:
+	case PACKET3_INDEX_BASE:
+	case PACKET3_DRAW_INDEX_2:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_DRAW_INDIRECT_MULTI:
+	case PACKET3_DRAW_INDEX_AUTO:
+	case PACKET3_DRAW_INDEX_IMMD:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+	case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
+	case PACKET3_MPEG_INDEX:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_PFP_SYNC_ME:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_SH_REG:
+	case PACKET3_SET_SH_REG_OFFSET:
+	case PACKET3_INCREMENT_DE_COUNTER:
+	case PACKET3_WAIT_ON_CE_COUNTER:
+	case PACKET3_WAIT_ON_AVAIL_BUFFER:
+	case PACKET3_ME_WRITE:
+		break;
+	case PACKET3_COPY_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_WRITE_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			start_reg = ib[idx + 1] * 4;
+			if (idx_value & 0x10000) {
+				if (!si_vm_reg_valid(start_reg))
+					return -EINVAL;
+			} else {
+				for (i = 0; i < (pkt->count - 2); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg))
+						return -EINVAL;
+				}
+			}
+		}
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+		command = ib[idx + 4];
+		info = ib[idx + 1];
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			if (((info & 0x60000000) >> 29) == 0) {
+				start_reg = idx_value << 2;
+				if (command & PACKET3_CP_DMA_CMD_SAIC) {
+					reg = start_reg;
+					if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!si_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad SRC register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			if (((info & 0x00300000) >> 20) == 0) {
+				start_reg = ib[idx + 2];
+				if (command & PACKET3_CP_DMA_CMD_DAIC) {
+					reg = start_reg;
+					if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!si_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad DST register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		break;
+	default:
+		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int si_vm_packet3_compute_check(struct radeon_device *rdev,
+				       u32 *ib, struct radeon_cs_packet *pkt)
+{
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, reg, i;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_ALLOC_GDS:
+	case PACKET3_WRITE_GDS_RAM:
+	case PACKET3_ATOMIC_GDS:
+	case PACKET3_ATOMIC:
+	case PACKET3_OCCLUSION_QUERY:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_PFP_SYNC_ME:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_SH_REG:
+	case PACKET3_SET_SH_REG_OFFSET:
+	case PACKET3_INCREMENT_DE_COUNTER:
+	case PACKET3_WAIT_ON_CE_COUNTER:
+	case PACKET3_WAIT_ON_AVAIL_BUFFER:
+	case PACKET3_ME_WRITE:
+		break;
+	case PACKET3_COPY_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_WRITE_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			start_reg = ib[idx + 1] * 4;
+			if (idx_value & 0x10000) {
+				if (!si_vm_reg_valid(start_reg))
+					return -EINVAL;
+			} else {
+				for (i = 0; i < (pkt->count - 2); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg))
+						return -EINVAL;
+				}
+			}
+		}
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	int ret = 0;
+	u32 idx = 0;
+	struct radeon_cs_packet pkt;
+
+	do {
+		pkt.idx = idx;
+		pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+		pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+		pkt.one_reg_wr = 0;
+		switch (pkt.type) {
+		case PACKET_TYPE0:
+			dev_err(rdev->dev, "Packet0 not allowed!\n");
+			ret = -EINVAL;
+			break;
+		case PACKET_TYPE2:
+			idx += 1;
+			break;
+		case PACKET_TYPE3:
+			pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+			if (ib->is_const_ib)
+				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
+			else {
+				switch (ib->ring) {
+				case RADEON_RING_TYPE_GFX_INDEX:
+					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
+					break;
+				case CAYMAN_RING_TYPE_CP1_INDEX:
+				case CAYMAN_RING_TYPE_CP2_INDEX:
+					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
+					break;
+				default:
+					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
+					ret = -EINVAL;
+					break;
+				}
+			}
+			idx += pkt.count + 2;
+			break;
+		default:
+			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+			ret = -EINVAL;
+			break;
+		}
+		if (ret)
+			break;
+	} while (idx < ib->length_dw);
+
+	return ret;
+}
+
+/*
+ * vm
+ */
+int si_vm_init(struct radeon_device *rdev)
+{
+	/* number of VMs */
+	rdev->vm_manager.nvm = 16;
+	/* base offset of vram pages */
+	rdev->vm_manager.vram_base_offset = 0;
+
+	return 0;
+}
+
+void si_vm_fini(struct radeon_device *rdev)
+{
+}
+
+/**
+ * si_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (cayman-si).
+ */
+void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+		    uint64_t addr, unsigned count,
+		    uint32_t incr, uint32_t flags)
+{
+	struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+	uint64_t value;
+	unsigned ndw;
+
+	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+		while (count) {
+			ndw = 2 + count * 2;
+			if (ndw > 0x3FFE)
+				ndw = 0x3FFE;
+
+			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+			radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+						 WRITE_DATA_DST_SEL(1)));
+			radeon_ring_write(ring, pe);
+			radeon_ring_write(ring, upper_32_bits(pe));
+			for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+				if (flags & RADEON_VM_PAGE_SYSTEM) {
+					value = radeon_vm_map_gart(rdev, addr);
+					value &= 0xFFFFFFFFFFFFF000ULL;
+				} else if (flags & RADEON_VM_PAGE_VALID) {
+					value = addr;
+				} else {
+					value = 0;
+				}
+				addr += incr;
+				value |= r600_flags;
+				radeon_ring_write(ring, value);
+				radeon_ring_write(ring, upper_32_bits(value));
+			}
+		}
+	} else {
+		/* DMA */
+		if (flags & RADEON_VM_PAGE_SYSTEM) {
+			while (count) {
+				ndw = count * 2;
+				if (ndw > 0xFFFFE)
+					ndw = 0xFFFFE;
+
+				/* for non-physically contiguous pages (system) */
+				radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+				radeon_ring_write(ring, pe);
+				radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+				for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+					if (flags & RADEON_VM_PAGE_SYSTEM) {
+						value = radeon_vm_map_gart(rdev, addr);
+						value &= 0xFFFFFFFFFFFFF000ULL;
+					} else if (flags & RADEON_VM_PAGE_VALID) {
+						value = addr;
+					} else {
+						value = 0;
+					}
+					addr += incr;
+					value |= r600_flags;
+					radeon_ring_write(ring, value);
+					radeon_ring_write(ring, upper_32_bits(value));
+				}
+			}
+		} else {
+			while (count) {
+				ndw = count * 2;
+				if (ndw > 0xFFFFE)
+					ndw = 0xFFFFE;
+
+				if (flags & RADEON_VM_PAGE_VALID)
+					value = addr;
+				else
+					value = 0;
+				/* for physically contiguous pages (vram) */
+				radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+				radeon_ring_write(ring, pe); /* dst addr */
+				radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+				radeon_ring_write(ring, r600_flags); /* mask */
+				radeon_ring_write(ring, 0);
+				radeon_ring_write(ring, value); /* value */
+				radeon_ring_write(ring, upper_32_bits(value));
+				radeon_ring_write(ring, incr); /* increment size */
+				radeon_ring_write(ring, 0);
+				pe += ndw * 4;
+				addr += (ndw / 2) * incr;
+				count -= ndw / 2;
+			}
+		}
+	}
+}
+
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	/* write new base address */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+
+	if (vm->id < 8) {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+	} else {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+	}
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0x1);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 1 << vm->id);
+
+	/* sync PFP to ME, otherwise we might get invalid PFP reads */
+	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+	radeon_ring_write(ring, 0x0);
+}
+
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	if (vm->id < 8) {
+		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+	} else {
+		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+	}
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 1 << vm->id);
+}
+
+/*
+ * RLC
+ */
+void si_rlc_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	/* save restore block */
+	if (rdev->rlc.save_restore_obj) {
+		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
+		radeon_bo_unpin(rdev->rlc.save_restore_obj);
+		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+
+		radeon_bo_unref(&rdev->rlc.save_restore_obj);
+		rdev->rlc.save_restore_obj = NULL;
+	}
+
+	/* clear state block */
+	if (rdev->rlc.clear_state_obj) {
+		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
+		radeon_bo_unpin(rdev->rlc.clear_state_obj);
+		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+
+		radeon_bo_unref(&rdev->rlc.clear_state_obj);
+		rdev->rlc.clear_state_obj = NULL;
+	}
+}
+
+int si_rlc_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* save restore block */
+	if (rdev->rlc.save_restore_obj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM, NULL,
+				     &rdev->rlc.save_restore_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+			return r;
+		}
+	}
+
+	r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+	if (unlikely(r != 0)) {
+		si_rlc_fini(rdev);
+		return r;
+	}
+	r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
+			  &rdev->rlc.save_restore_gpu_addr);
+	radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+	if (r) {
+		dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
+		si_rlc_fini(rdev);
+		return r;
+	}
+
+	/* clear state block */
+	if (rdev->rlc.clear_state_obj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM, NULL,
+				     &rdev->rlc.clear_state_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+			si_rlc_fini(rdev);
+			return r;
+		}
+	}
+	r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+	if (unlikely(r != 0)) {
+		si_rlc_fini(rdev);
+		return r;
+	}
+	r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
+			  &rdev->rlc.clear_state_gpu_addr);
+	radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+	if (r) {
+		dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
+		si_rlc_fini(rdev);
+		return r;
+	}
+
+	return 0;
+}
+
+static void si_rlc_stop(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, 0);
+}
+
+static void si_rlc_start(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int si_rlc_resume(struct radeon_device *rdev)
+{
+	u32 i;
+	const __be32 *fw_data;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	si_rlc_stop(rdev);
+
+	WREG32(RLC_RL_BASE, 0);
+	WREG32(RLC_RL_SIZE, 0);
+	WREG32(RLC_LB_CNTL, 0);
+	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+	WREG32(RLC_LB_CNTR_INIT, 0);
+
+	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	fw_data = (const __be32 *)rdev->rlc_fw->data;
+	for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+		WREG32(RLC_UCODE_ADDR, i);
+		WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	si_rlc_start(rdev);
+
+	return 0;
+}
+
+static void si_enable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	rdev->ih.enabled = true;
+}
+
+static void si_disable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	rdev->ih.enabled = false;
+	rdev->ih.rptr = 0;
+}
+
+static void si_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING1, 0);
+	WREG32(CP_INT_CNTL_RING2, 0);
+	tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	if (rdev->num_crtc >= 4) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+
+	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD1_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD2_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD3_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD4_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD5_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+static int si_irq_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+	/* allocate ring */
+	ret = r600_ih_ring_alloc(rdev);
+	if (ret)
+		return ret;
+
+	/* disable irqs */
+	si_disable_interrupts(rdev);
+
+	/* init rlc */
+	ret = si_rlc_resume(rdev);
+	if (ret) {
+		r600_ih_ring_fini(rdev);
+		return ret;
+	}
+
+	/* setup interrupt control */
+	/* set dummy read address to ring address */
+	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+		      IH_WPTR_OVERFLOW_CLEAR |
+		      (rb_bufsz << 1));
+
+	if (rdev->wb.enabled)
+		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+	/* set the writeback address whether it's enabled or not */
+	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+	/* RPTR_REARM only works if msi's are enabled */
+	if (rdev->msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	/* force the active interrupt state to all disabled */
+	si_disable_interrupt_state(rdev);
+
+	pci_enable_busmaster(rdev->dev);
+
+	/* enable irqs */
+	si_enable_interrupts(rdev);
+
+	return ret;
+}
+
+int si_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+	u32 grbm_int_cntl = 0;
+	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+	u32 dma_cntl, dma_cntl1;
+
+	if (!rdev->irq.installed) {
+		DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		si_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		si_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+	dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
+	/* enable CP interrupts on all rings */
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int gfx\n");
+		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int cp1\n");
+		cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int cp2\n");
+		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int dma1\n");
+		dma_cntl1 |= TRAP_ENABLE;
+	}
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		DRM_DEBUG("si_irq_set: vblank 0\n");
+		crtc1 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		DRM_DEBUG("si_irq_set: vblank 1\n");
+		crtc2 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[2] ||
+	    atomic_read(&rdev->irq.pflip[2])) {
+		DRM_DEBUG("si_irq_set: vblank 2\n");
+		crtc3 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[3] ||
+	    atomic_read(&rdev->irq.pflip[3])) {
+		DRM_DEBUG("si_irq_set: vblank 3\n");
+		crtc4 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[4] ||
+	    atomic_read(&rdev->irq.pflip[4])) {
+		DRM_DEBUG("si_irq_set: vblank 4\n");
+		crtc5 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[5] ||
+	    atomic_read(&rdev->irq.pflip[5])) {
+		DRM_DEBUG("si_irq_set: vblank 5\n");
+		crtc6 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("si_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("si_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("si_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("si_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("si_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("si_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+
+	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
+	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
+
+	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+	if (rdev->num_crtc >= 4) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	}
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+	}
+
+	WREG32(DC_HPD1_INT_CONTROL, hpd1);
+	WREG32(DC_HPD2_INT_CONTROL, hpd2);
+	WREG32(DC_HPD3_INT_CONTROL, hpd3);
+	WREG32(DC_HPD4_INT_CONTROL, hpd4);
+	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
+	return 0;
+}
+
+static inline void si_irq_ack(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	if (rdev->num_crtc >= 4) {
+		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	}
+	if (rdev->num_crtc >= 6) {
+		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	}
+
+	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+	if (rdev->num_crtc >= 4) {
+		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->num_crtc >= 6) {
+		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+	}
+}
+
+static void si_irq_disable(struct radeon_device *rdev)
+{
+	si_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	si_irq_ack(rdev);
+	si_disable_interrupt_state(rdev);
+}
+
+static void si_irq_suspend(struct radeon_device *rdev)
+{
+	si_irq_disable(rdev);
+	si_rlc_stop(rdev);
+}
+
+static void si_irq_fini(struct radeon_device *rdev)
+{
+	si_irq_suspend(rdev);
+	r600_ih_ring_fini(rdev);
+}
+
+static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+/*        SI IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [63:60]  - reserved
+ * [71:64]  - RINGID
+ * [79:72]  - VMID
+ * [127:80] - reserved
+ */
+irqreturn_t si_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data, ring_id;
+	u32 ring_index;
+	bool queue_hotplug = false;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	wptr = si_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	si_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[0]))
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D1 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D1 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 2: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[1]))
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D2 vblank\n");
+				}
+				break;
+			case 1: /* D2 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D2 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 3: /* D3 vblank/vline */
+			switch (src_data) {
+			case 0: /* D3 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[2]) {
+						drm_handle_vblank(rdev->ddev, 2);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[2]))
+						radeon_crtc_handle_flip(rdev, 2);
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D3 vblank\n");
+				}
+				break;
+			case 1: /* D3 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D3 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 4: /* D4 vblank/vline */
+			switch (src_data) {
+			case 0: /* D4 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[3]) {
+						drm_handle_vblank(rdev->ddev, 3);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[3]))
+						radeon_crtc_handle_flip(rdev, 3);
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D4 vblank\n");
+				}
+				break;
+			case 1: /* D4 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D4 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D5 vblank/vline */
+			switch (src_data) {
+			case 0: /* D5 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[4]) {
+						drm_handle_vblank(rdev->ddev, 4);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[4]))
+						radeon_crtc_handle_flip(rdev, 4);
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D5 vblank\n");
+				}
+				break;
+			case 1: /* D5 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D5 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 6: /* D6 vblank/vline */
+			switch (src_data) {
+			case 0: /* D6 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[5]) {
+						drm_handle_vblank(rdev->ddev, 5);
+						rdev->pm.vblank_sync = true;
+						DRM_WAKEUP(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[5]))
+						radeon_crtc_handle_flip(rdev, 5);
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D6 vblank\n");
+				}
+				break;
+			case 1: /* D6 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D6 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 42: /* HPD hotplug */
+			switch (src_data) {
+			case 0:
+				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD1\n");
+				}
+				break;
+			case 1:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD2\n");
+				}
+				break;
+			case 2:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD3\n");
+				}
+				break;
+			case 3:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD4\n");
+				}
+				break;
+			case 4:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD5\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD6\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 146:
+		case 147:
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			break;
+		case 176: /* RINGID0 CP_INT */
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 177: /* RINGID1 CP_INT */
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+			break;
+		case 178: /* RINGID2 CP_INT */
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			switch (ring_id) {
+			case 0:
+				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+				break;
+			case 1:
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+				break;
+			case 2:
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+				break;
+			}
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		case 244: /* DMA trap event */
+			DRM_DEBUG("IH: DMA1 trap\n");
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+	}
+	if (queue_hotplug)
+		taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
+	rdev->ih.rptr = rptr;
+	WREG32(IH_RB_RPTR, rdev->ih.rptr);
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = si_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+		uint64_t src_offset, uint64_t dst_offset,
+		unsigned num_gpu_pages,
+		struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0xFFFFF)
+			cur_size_in_bytes = 0xFFFFF;
+		size_in_bytes -= cur_size_in_bytes;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+		radeon_ring_write(ring, dst_offset & 0xffffffff);
+		radeon_ring_write(ring, src_offset & 0xffffffff);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
+/*
+ * startup/shutdown callbacks
+ */
+static int si_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+	    !rdev->rlc_fw || !rdev->mc_fw) {
+		r = si_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	r = si_mc_load_microcode(rdev);
+	if (r) {
+		DRM_ERROR("Failed to load MC firmware!\n");
+		return r;
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	si_mc_program(rdev);
+	r = si_pcie_gart_enable(rdev);
+	if (r)
+		return r;
+	si_gpu_init(rdev);
+
+#if 0
+	r = evergreen_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+#endif
+	/* allocate rlc buffers */
+	r = si_rlc_init(rdev);
+	if (r) {
+		DRM_ERROR("Failed to init rlc BOs!\n");
+		return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	r = si_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	si_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     CP_RB0_RPTR, CP_RB0_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
+			     CP_RB1_RPTR, CP_RB1_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
+			     CP_RB2_RPTR, CP_RB2_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = si_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = si_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = cayman_dma_resume(rdev);
+	if (r)
+		return r;
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_vm_manager_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int si_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	rdev->accel_working = true;
+	r = si_startup(rdev);
+	if (r) {
+		DRM_ERROR("si startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int si_suspend(struct radeon_device *rdev)
+{
+	radeon_vm_manager_fini(rdev);
+	si_cp_enable(rdev, false);
+	cayman_dma_stop(rdev);
+	si_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	si_pcie_gart_disable(rdev);
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int si_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize scratch registers */
+	si_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+
+	/* initialize memory controller */
+	r = si_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = si_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		si_cp_fini(rdev);
+		cayman_dma_fini(rdev);
+		si_irq_fini(rdev);
+		si_rlc_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_vm_manager_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		si_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 */
+	if (!rdev->mc_fw) {
+		DRM_ERROR("radeon: MC ucode required for NI+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void si_fini(struct radeon_device *rdev)
+{
+#if 0
+	r600_blit_fini(rdev);
+#endif
+	si_cp_fini(rdev);
+	cayman_dma_fini(rdev);
+	si_irq_fini(rdev);
+	si_rlc_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	si_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	si_fini_microcode(rdev);
+	free(rdev->bios, DRM_MEM_DRIVER);
+	rdev->bios = NULL;
+}
+
+/**
+ * si_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	sx_xlock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	sx_xunlock(&rdev->gpu_clock_mutex);
+	return clock;
+}


Property changes on: trunk/sys/dev/drm2/radeon/si.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/si_blit_shaders.c
===================================================================
--- trunk/sys/dev/drm2/radeon/si_blit_shaders.c	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/si_blit_shaders.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,255 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher at amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/si_blit_shaders.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+
+const u32 si_default_state[] =
+{
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0046900,
+	0x00000008,
+	0x00000000, /* DB_DEPTH_BOUNDS_MIN */
+	0x00000000, /* DB_DEPTH_BOUNDS_MAX */
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0036900,
+	0x0000000f,
+	0x00000000, /* DB_DEPTH_INFO */
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0026900,
+	0x000000d9,
+	0x00000000, /* CP_RINGID */
+	0x00000000, /* CP_VMID */
+
+	0xc0046900,
+	0x00000100,
+	0xffffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+
+	0xc0046900,
+	0x00000105,
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc00e6900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+	0x00000000, /* DB_EQAA */
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000,
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000,
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000,
+
+	0xc01b6900,
+	0x000002f5,
+	0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+	0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+	0xffffffff,
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 si_default_size = ARRAY_SIZE(si_default_state);


Property changes on: trunk/sys/dev/drm2/radeon/si_blit_shaders.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/si_blit_shaders.h
===================================================================
--- trunk/sys/dev/drm2/radeon/si_blit_shaders.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/si_blit_shaders.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,36 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SI_BLIT_SHADERS_H
+#define SI_BLIT_SHADERS_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/si_blit_shaders.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+extern const u32 si_default_state[];
+
+extern const u32 si_default_size;
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/si_blit_shaders.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/si_reg.h
===================================================================
--- trunk/sys/dev/drm2/radeon/si_reg.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/si_reg.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,109 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/si_reg.h 254885 2013-08-25 19:37:15Z dumbbell $");
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK                      0x65b0
+#define SI_DC_GPIO_HPD_A                         0x65b4
+#define SI_DC_GPIO_HPD_EN                        0x65b8
+#define SI_DC_GPIO_HPD_Y                         0x65bc
+
+#define SI_GRPH_CONTROL                          0x6804
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)		 (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2			 0
+#       define SI_ADDR_SURF_P4_8x16		 4
+#       define SI_ADDR_SURF_P4_16x16		 5
+#       define SI_ADDR_SURF_P4_16x32		 6
+#       define SI_ADDR_SURF_P4_32x32		 7
+#       define SI_ADDR_SURF_P8_16x16_8x16	 8
+#       define SI_ADDR_SURF_P8_16x32_8x16	 9
+#       define SI_ADDR_SURF_P8_32x32_8x16	 10
+#       define SI_ADDR_SURF_P8_16x32_16x16	 11
+#       define SI_ADDR_SURF_P8_32x32_16x16	 12
+#       define SI_ADDR_SURF_P8_32x32_16x32	 13
+#       define SI_ADDR_SURF_P8_32x64_32x32	 14
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/si_reg.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/radeon/sid.h
===================================================================
--- trunk/sys/dev/drm2/radeon/sid.h	                        (rev 0)
+++ trunk/sys/dev/drm2/radeon/sid.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1068 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/radeon/sid.h 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+
+#define	CG_MULT_THERMAL_STATUS					0x714
+#define		ASIC_MAX_TEMP(x)				((x) << 0)
+#define		ASIC_MAX_TEMP_MASK				0x000001ff
+#define		ASIC_MAX_TEMP_SHIFT				0
+#define		CTF_TEMP(x)					((x) << 9)
+#define		CTF_TEMP_MASK					0x0003fe00
+#define		CTF_TEMP_SHIFT					9
+
+#define SI_MAX_SH_GPRS           256
+#define SI_MAX_TEMP_GPRS         16
+#define SI_MAX_SH_THREADS        256
+#define SI_MAX_SH_STACK_ENTRIES  4096
+#define SI_MAX_FRC_EOV_CNT       16384
+#define SI_MAX_BACKENDS          8
+#define SI_MAX_BACKENDS_MASK     0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK     0x0F
+#define SI_MAX_SIMDS             12
+#define SI_MAX_SIMDS_MASK        0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK        0x00FF
+#define SI_MAX_PIPES             8
+#define SI_MAX_PIPES_MASK        0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK      0x3F
+#define SI_MAX_LDS_NUM           0xFFFF
+#define SI_MAX_TCC               16
+#define SI_MAX_TCC_MASK          0xFFFF
+
+#define VGA_HDP_CONTROL  				0x328
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+
+#define DMIF_ADDR_CONFIG  				0xBD4
+
+#define DMIF_ADDR_CALC  				0xC00
+
+#define	SRBM_STATUS				        0xE50
+
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0xe80
+#define	GC_USER_SYS_RB_BACKEND_DISABLE			0xe84
+
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)		((x) << 2)
+#define		L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)		((x) << 4)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 19)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define		INVALIDATE_CACHE_MODE(x)			((x) << 26)
+#define			INVALIDATE_PTE_AND_PDE_CACHES		0
+#define			INVALIDATE_ONLY_PTE_CACHES		1
+#define			INVALIDATE_ONLY_PDE_CACHES		2
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		L2_CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT0_CNTL2				0x1430
+#define VM_CONTEXT1_CNTL2				0x1434
+#define	VM_CONTEXT8_PAGE_TABLE_BASE_ADDR		0x1438
+#define	VM_CONTEXT9_PAGE_TABLE_BASE_ADDR		0x143c
+#define	VM_CONTEXT10_PAGE_TABLE_BASE_ADDR		0x1440
+#define	VM_CONTEXT11_PAGE_TABLE_BASE_ADDR		0x1444
+#define	VM_CONTEXT12_PAGE_TABLE_BASE_ADDR		0x1448
+#define	VM_CONTEXT13_PAGE_TABLE_BASE_ADDR		0x144c
+#define	VM_CONTEXT14_PAGE_TABLE_BASE_ADDR		0x1450
+#define	VM_CONTEXT15_PAGE_TABLE_BASE_ADDR		0x1454
+
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+
+#define VM_INVALIDATE_REQUEST				0x1478
+#define VM_INVALIDATE_RESPONSE				0x147c
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x151c
+
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153c
+#define	VM_CONTEXT1_PAGE_TABLE_BASE_ADDR		0x1540
+#define	VM_CONTEXT2_PAGE_TABLE_BASE_ADDR		0x1544
+#define	VM_CONTEXT3_PAGE_TABLE_BASE_ADDR		0x1548
+#define	VM_CONTEXT4_PAGE_TABLE_BASE_ADDR		0x154c
+#define	VM_CONTEXT5_PAGE_TABLE_BASE_ADDR		0x1550
+#define	VM_CONTEXT6_PAGE_TABLE_BASE_ADDR		0x1554
+#define	VM_CONTEXT7_PAGE_TABLE_BASE_ADDR		0x1558
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155c
+#define	VM_CONTEXT1_PAGE_TABLE_START_ADDR		0x1560
+
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT1_PAGE_TABLE_END_ADDR			0x1580
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x0000f000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+
+#define	MC_VM_MX_L1_TLB_CNTL				0x2064
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define		NOOFGROUPS_SHIFT				12
+#define		NOOFGROUPS_MASK					0x00001000
+
+#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x2808
+#define		TRAIN_DONE_D0      			(1 << 30)
+#define		TRAIN_DONE_D1      			(1U << 31)
+
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL					0x2F4C
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1U << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+#       define MC_VMID(x)                                 ((x) << 25)
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+
+#define	BIF_FB_EN						0x5490
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+
+#define	DC_LB_MEMORY_SPLIT					0x6b0c
+#define		DC_LB_MEMORY_CONFIG(x)				((x) << 20)
+
+#define	PRIORITY_A_CNT						0x6b18
+#define		PRIORITY_MARK_MASK				0x7fff
+#define		PRIORITY_OFF					(1 << 16)
+#define		PRIORITY_ALWAYS_ON				(1 << 20)
+#define	PRIORITY_B_CNT						0x6b1c
+
+#define	DPG_PIPE_ARBITRATION_CONTROL3				0x6cc8
+#       define LATENCY_WATERMARK_MASK(x)			((x) << 16)
+#define	DPG_PIPE_LATENCY_CONTROL				0x6ccc
+#       define LATENCY_LOW_WATERMARK(x)				((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)			((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x6bb8
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x6bbc
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x6b40
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DACA_AUTODETECT_INT_CONTROL			0x66c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x6e98
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+
+#define	GRBM_STATUS2					0x8008
+#define		RLC_RQ_PENDING 					(1 << 0)
+#define		RLC_BUSY 					(1 << 8)
+#define		TC_BUSY 					(1 << 9)
+
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		RING2_RQ_PENDING				(1 << 4)
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		RING1_RQ_PENDING				(1 << 6)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		SPI_BUSY					(1 << 22)
+#define		BCI_BUSY					(1 << 23)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1U << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_BCI_BUSY					(1 << 22)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1U << 31)
+
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_RLC					(1 << 2)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_GDS					(1 << 4)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_BCI					(1 << 7)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VGT					(1 << 14)
+#define		SOFT_RESET_IA					(1 << 15)
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SH_INDEX(x)     			((x) << 8)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		SH_BROADCAST_WRITES      		(1 << 29)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1U << 31)
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+#define	CP_STRMOUT_CNTL					0x84FC
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+
+#define CP_ME_CNTL					0x86D8
+#define		CP_CE_HALT					(1 << 24)
+#define		CP_PFP_HALT					(1 << 26)
+#define		CP_ME_HALT					(1 << 28)
+
+#define	CP_COHER_CNTL2					0x85E8
+
+#define	CP_RB2_RPTR					0x86f8
+#define	CP_RB1_RPTR					0x86fc
+#define	CP_RB0_RPTR					0x8700
+#define	CP_RB_WPTR_DELAY				0x8704
+
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+
+#define	CP_PERFMON_CNTL					0x87FC
+
+#define	VGT_VTX_VECT_EJECT_REG				0x88B0
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_ESGS_RING_SIZE				0x88C8
+#define	VGT_GSVS_RING_SIZE				0x88CC
+
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+
+#define	VGT_PRIMITIVE_TYPE				0x8958
+#define	VGT_INDEX_TYPE					0x895C
+
+#define	VGT_NUM_INDICES					0x8970
+#define	VGT_NUM_INSTANCES				0x8974
+
+#define	VGT_TF_RING_SIZE				0x8988
+
+#define	VGT_HS_OFFCHIP_PARAM				0x89B0
+
+#define	VGT_TF_MEMORY_BASE				0x89B8
+
+#define CC_GC_SHADER_ARRAY_CONFIG			0x89bc
+#define		INACTIVE_CUS_MASK			0xFFFF0000
+#define		INACTIVE_CUS_SHIFT			16
+#define GC_USER_SHADER_ARRAY_CONFIG			0x89c0
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+
+#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
+
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_FRONTEND_PRIM_FIFO_SIZE(x)			((x) << 0)
+#define		SC_BACKEND_PRIM_FIFO_SIZE(x)			((x) << 6)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 15)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 23)
+
+#define	PA_SC_ENHANCE					0x8BF0
+
+#define	SQ_CONFIG					0x8C00
+
+#define	SQC_CACHES					0x8C08
+
+#define	SX_DEBUG_1					0x9060
+
+#define	SPI_STATIC_THREAD_MGMT_1			0x90E0
+#define	SPI_STATIC_THREAD_MGMT_2			0x90E4
+#define	SPI_STATIC_THREAD_MGMT_3			0x90E8
+#define	SPI_PS_MAX_WAVE_ID				0x90EC
+
+#define	SPI_CONFIG_CNTL					0x9100
+
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_TCC_DISABLE				0x914C
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+
+#define	TA_CNTL_AUX					0x9508
+
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		NUM_GPUS_MASK				0x00700000
+#define		NUM_GPUS_SHIFT				20
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
+#define		MULTI_GPU_TILE_SIZE_SHIFT		24
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+
+#define	GB_TILE_MODE0					0x9910
+#       define MICRO_TILE_MODE(x)				((x) << 0)
+#              define	ADDR_SURF_DISPLAY_MICRO_TILING		0
+#              define	ADDR_SURF_THIN_MICRO_TILING		1
+#              define	ADDR_SURF_DEPTH_MICRO_TILING		2
+#       define ARRAY_MODE(x)					((x) << 2)
+#              define	ARRAY_LINEAR_GENERAL			0
+#              define	ARRAY_LINEAR_ALIGNED			1
+#              define	ARRAY_1D_TILED_THIN1			2
+#              define	ARRAY_2D_TILED_THIN1			4
+#       define PIPE_CONFIG(x)					((x) << 6)
+#              define	ADDR_SURF_P2				0
+#              define	ADDR_SURF_P4_8x16			4
+#              define	ADDR_SURF_P4_16x16			5
+#              define	ADDR_SURF_P4_16x32			6
+#              define	ADDR_SURF_P4_32x32			7
+#              define	ADDR_SURF_P8_16x16_8x16			8
+#              define	ADDR_SURF_P8_16x32_8x16			9
+#              define	ADDR_SURF_P8_32x32_8x16			10
+#              define	ADDR_SURF_P8_16x32_16x16		11
+#              define	ADDR_SURF_P8_32x32_16x16		12
+#              define	ADDR_SURF_P8_32x32_16x32		13
+#              define	ADDR_SURF_P8_32x64_32x32		14
+#       define TILE_SPLIT(x)					((x) << 11)
+#              define	ADDR_SURF_TILE_SPLIT_64B		0
+#              define	ADDR_SURF_TILE_SPLIT_128B		1
+#              define	ADDR_SURF_TILE_SPLIT_256B		2
+#              define	ADDR_SURF_TILE_SPLIT_512B		3
+#              define	ADDR_SURF_TILE_SPLIT_1KB		4
+#              define	ADDR_SURF_TILE_SPLIT_2KB		5
+#              define	ADDR_SURF_TILE_SPLIT_4KB		6
+#       define BANK_WIDTH(x)					((x) << 14)
+#              define	ADDR_SURF_BANK_WIDTH_1			0
+#              define	ADDR_SURF_BANK_WIDTH_2			1
+#              define	ADDR_SURF_BANK_WIDTH_4			2
+#              define	ADDR_SURF_BANK_WIDTH_8			3
+#       define BANK_HEIGHT(x)					((x) << 16)
+#              define	ADDR_SURF_BANK_HEIGHT_1			0
+#              define	ADDR_SURF_BANK_HEIGHT_2			1
+#              define	ADDR_SURF_BANK_HEIGHT_4			2
+#              define	ADDR_SURF_BANK_HEIGHT_8			3
+#       define MACRO_TILE_ASPECT(x)				((x) << 18)
+#              define	ADDR_SURF_MACRO_ASPECT_1		0
+#              define	ADDR_SURF_MACRO_ASPECT_2		1
+#              define	ADDR_SURF_MACRO_ASPECT_4		2
+#              define	ADDR_SURF_MACRO_ASPECT_8		3
+#       define NUM_BANKS(x)					((x) << 20)
+#              define	ADDR_SURF_2_BANK			0
+#              define	ADDR_SURF_4_BANK			1
+#              define	ADDR_SURF_8_BANK			2
+#              define	ADDR_SURF_16_BANK			3
+
+#define	CB_PERFCOUNTER0_SELECT0				0x9a20
+#define	CB_PERFCOUNTER0_SELECT1				0x9a24
+#define	CB_PERFCOUNTER1_SELECT0				0x9a28
+#define	CB_PERFCOUNTER1_SELECT1				0x9a2c
+#define	CB_PERFCOUNTER2_SELECT0				0x9a30
+#define	CB_PERFCOUNTER2_SELECT1				0x9a34
+#define	CB_PERFCOUNTER3_SELECT0				0x9a38
+#define	CB_PERFCOUNTER3_SELECT1				0x9a3c
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	TCP_CHAN_STEER_LO				0xac0c
+#define	TCP_CHAN_STEER_HI				0xac10
+
+#define	CP_RB0_BASE					0xC100
+#define	CP_RB0_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1U << 31)
+
+#define	CP_RB0_RPTR_ADDR				0xC10C
+#define	CP_RB0_RPTR_ADDR_HI				0xC110
+#define	CP_RB0_WPTR					0xC114
+
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define	CP_ME_RAM_DATA					0xC160
+
+#define	CP_CE_UCODE_ADDR				0xC168
+#define	CP_CE_UCODE_DATA				0xC16C
+
+#define	CP_RB1_BASE					0xC180
+#define	CP_RB1_CNTL					0xC184
+#define	CP_RB1_RPTR_ADDR				0xC188
+#define	CP_RB1_RPTR_ADDR_HI				0xC18C
+#define	CP_RB1_WPTR					0xC190
+#define	CP_RB2_BASE					0xC194
+#define	CP_RB2_CNTL					0xC198
+#define	CP_RB2_RPTR_ADDR				0xC19C
+#define	CP_RB2_RPTR_ADDR_HI				0xC1A0
+#define	CP_RB2_WPTR					0xC1A4
+#define CP_INT_CNTL_RING0                               0xC1A8
+#define CP_INT_CNTL_RING1                               0xC1AC
+#define CP_INT_CNTL_RING2                               0xC1B0
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define WAIT_MEM_SEM_INT_ENABLE                  (1 << 21)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
+#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
+#       define CP_RINGID0_INT_ENABLE                    (1U << 31)
+#define CP_INT_STATUS_RING0                             0xC1B4
+#define CP_INT_STATUS_RING1                             0xC1B8
+#define CP_INT_STATUS_RING2                             0xC1BC
+#       define WAIT_MEM_SEM_INT_STAT                    (1 << 21)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define CP_RINGID2_INT_STAT                      (1 << 29)
+#       define CP_RINGID1_INT_STAT                      (1 << 30)
+#       define CP_RINGID0_INT_STAT                      (1U << 31)
+
+#define	CP_DEBUG					0xC1FC
+
+#define RLC_CNTL                                          0xC300
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_RL_BASE                                       0xC304
+#define RLC_RL_SIZE                                       0xC308
+#define RLC_LB_CNTL                                       0xC30C
+#define RLC_SAVE_AND_RESTORE_BASE                         0xC310
+#define RLC_LB_CNTR_MAX                                   0xC314
+#define RLC_LB_CNTR_INIT                                  0xC318
+
+#define RLC_CLEAR_STATE_RESTORE_BASE                      0xC320
+
+#define RLC_UCODE_ADDR                                    0xC32C
+#define RLC_UCODE_DATA                                    0xC330
+
+#define RLC_GPU_CLOCK_COUNT_LSB                           0xC338
+#define RLC_GPU_CLOCK_COUNT_MSB                           0xC33C
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0xC340
+#define RLC_MC_CNTL                                       0xC344
+#define RLC_UCODE_CNTL                                    0xC348
+
+#define PA_SC_RASTER_CONFIG                             0x28350
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
+#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
+#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
+#       define CACHE_FLUSH_TS                           (4 << 0)
+#       define CACHE_FLUSH                              (6 << 0)
+#       define CS_PARTIAL_FLUSH                         (7 << 0)
+#       define VGT_STREAMOUT_RESET                      (10 << 0)
+#       define END_OF_PIPE_INCR_DE                      (11 << 0)
+#       define END_OF_PIPE_IB_END                       (12 << 0)
+#       define RST_PIX_CNT                              (13 << 0)
+#       define VS_PARTIAL_FLUSH                         (15 << 0)
+#       define PS_PARTIAL_FLUSH                         (16 << 0)
+#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
+#       define ZPASS_DONE                               (21 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
+#       define PERFCOUNTER_START                        (23 << 0)
+#       define PERFCOUNTER_STOP                         (24 << 0)
+#       define PIPELINESTAT_START                       (25 << 0)
+#       define PIPELINESTAT_STOP                        (26 << 0)
+#       define PERFCOUNTER_SAMPLE                       (27 << 0)
+#       define SAMPLE_PIPELINESTAT                      (30 << 0)
+#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
+#       define RESET_VTX_CNT                            (33 << 0)
+#       define VGT_FLUSH                                (36 << 0)
+#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
+#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
+#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
+#       define FLUSH_AND_INV_DB_META                    (44 << 0)
+#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
+#       define FLUSH_AND_INV_CB_META                    (46 << 0)
+#       define CS_DONE                                  (47 << 0)
+#       define PS_DONE                                  (48 << 0)
+#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
+#       define THREAD_TRACE_START                       (51 << 0)
+#       define THREAD_TRACE_STOP                        (52 << 0)
+#       define THREAD_TRACE_FLUSH                       (54 << 0)
+#       define THREAD_TRACE_FINISH                      (55 << 0)
+
+/*
+ * PM4
+ */
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n)	((PACKET_TYPE0 << 30) |				\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((PACKET_TYPE3 << 30) |				\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define		PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define			GDS_PARTITION_BASE		2
+#define			CE_PARTITION_BASE		3
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_ALLOC_GDS				0x1B
+#define	PACKET3_WRITE_GDS_RAM				0x1C
+#define	PACKET3_ATOMIC_GDS				0x1D
+#define	PACKET3_ATOMIC					0x1E
+#define	PACKET3_OCCLUSION_QUERY				0x1F
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDIRECT_MULTI			0x2C
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER_CONST			0x31
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+		 * 1 - memory (sync - via GRBM)
+		 * 2 - tc/l2
+		 * 3 - gds
+		 * 4 - reserved
+		 * 5 - memory (async - direct)
+		 */
+#define		WR_ONE_ADDR                             (1 << 16)
+#define		WR_CONFIRM                              (1 << 20)
+#define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+		 * 1 - pfp
+		 * 2 - ce
+		 */
+#define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_COPY_DATA				0x40
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1U << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - EOP events
+		 * 6 - EOS events
+		 * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+		 */
+#define		INV_L2                                  (1 << 20)
+                /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_LOAD_CONFIG_REG				0x5F
+#define	PACKET3_LOAD_CONTEXT_REG			0x60
+#define	PACKET3_LOAD_SH_REG				0x61
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000b000
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_SH_REG				0x76
+#define		PACKET3_SET_SH_REG_START			0x0000b000
+#define		PACKET3_SET_SH_REG_END				0x0000c000
+#define	PACKET3_SET_SH_REG_OFFSET			0x77
+#define	PACKET3_ME_WRITE				0x7A
+#define	PACKET3_SCRATCH_RAM_WRITE			0x7D
+#define	PACKET3_SCRATCH_RAM_READ			0x7E
+#define	PACKET3_CE_WRITE				0x7F
+#define	PACKET3_LOAD_CONST_RAM				0x80
+#define	PACKET3_WRITE_CONST_RAM				0x81
+#define	PACKET3_WRITE_CONST_RAM_OFFSET			0x82
+#define	PACKET3_DUMP_CONST_RAM				0x83
+#define	PACKET3_INCREMENT_CE_COUNTER			0x84
+#define	PACKET3_INCREMENT_DE_COUNTER			0x85
+#define	PACKET3_WAIT_ON_CE_COUNTER			0x86
+#define	PACKET3_WAIT_ON_DE_COUNTER			0x87
+#define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
+#define	PACKET3_SET_CE_DE_COUNTERS			0x89
+#define	PACKET3_WAIT_ON_AVAIL_BUFFER			0x8A
+#define	PACKET3_SWITCH_BUFFER				0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_TILING_CONFIG  				  0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((b) & 0x1) << 26) |		\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)		((2 << 28) |			\
+					 (1 << 26) |			\
+					 (1 << 21) |			\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+#endif


Property changes on: trunk/sys/dev/drm2/radeon/sid.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_agp_backend.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_agp_backend.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_agp_backend.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,137 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *          Keith Packard.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_agp_backend.c 275408 2014-12-02 14:09:54Z tijl $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+#ifdef TTM_HAS_AGP
+#include <dev/drm2/ttm/ttm_placement.h>
+
+struct ttm_agp_backend {
+	struct ttm_tt ttm;
+	vm_offset_t offset;
+	vm_page_t *pages;
+	device_t bridge;
+};
+
+MALLOC_DEFINE(M_TTM_AGP, "ttm_agp", "TTM AGP Backend");
+
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+	struct drm_mm_node *node = bo_mem->mm_node;
+	int ret;
+	unsigned i;
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		vm_page_t page = ttm->pages[i];
+
+		if (!page)
+			page = ttm->dummy_read_page;
+
+		agp_be->pages[i] = page;
+	}
+
+	agp_be->offset = node->start * PAGE_SIZE;
+	ret = -agp_bind_pages(agp_be->bridge, agp_be->pages,
+			      ttm->num_pages << PAGE_SHIFT, agp_be->offset);
+	if (ret)
+		printf("[TTM] AGP Bind memory failed\n");
+
+	return ret;
+}
+
+static int ttm_agp_unbind(struct ttm_tt *ttm)
+{
+	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+	return -agp_unbind_pages(agp_be->bridge, ttm->num_pages << PAGE_SHIFT,
+				 agp_be->offset);
+}
+
+static void ttm_agp_destroy(struct ttm_tt *ttm)
+{
+	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+	ttm_tt_fini(ttm);
+	free(agp_be->pages, M_TTM_AGP);
+	free(agp_be, M_TTM_AGP);
+}
+
+static struct ttm_backend_func ttm_agp_func = {
+	.bind = ttm_agp_bind,
+	.unbind = ttm_agp_unbind,
+	.destroy = ttm_agp_destroy,
+};
+
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+				 device_t bridge,
+				 unsigned long size, uint32_t page_flags,
+				 vm_page_t dummy_read_page)
+{
+	struct ttm_agp_backend *agp_be;
+
+	agp_be = malloc(sizeof(*agp_be), M_TTM_AGP, M_WAITOK | M_ZERO);
+
+	agp_be->bridge = bridge;
+	agp_be->ttm.func = &ttm_agp_func;
+
+	if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+		free(agp_be, M_TTM_AGP);
+		return NULL;
+	}
+
+	agp_be->offset = 0;
+	agp_be->pages = malloc(agp_be->ttm.num_pages * sizeof(*agp_be->pages),
+			       M_TTM_AGP, M_WAITOK);
+
+	return &agp_be->ttm;
+}
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	return ttm_pool_populate(ttm);
+}
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_agp_backend.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_bo.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_bo.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_bo.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1897 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_bo.c 285002 2015-07-01 11:28:42Z avg $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <vm/vm_pageout.h>
+
+#define TTM_ASSERT_LOCKED(param)
+#define TTM_DEBUG(fmt, arg...)
+#define TTM_BO_HASH_ORDER 13
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
+static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
+
+MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
+
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+	int i;
+
+	for (i = 0; i <= TTM_PL_PRIV5; i++)
+		if (flags & (1 << i)) {
+			*mem_type = i;
+			return 0;
+		}
+	return -EINVAL;
+}
+
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+	printf("    has_type: %d\n", man->has_type);
+	printf("    use_type: %d\n", man->use_type);
+	printf("    flags: 0x%08X\n", man->flags);
+	printf("    gpu_offset: 0x%08lX\n", man->gpu_offset);
+	printf("    size: %ju\n", (uintmax_t)man->size);
+	printf("    available_caching: 0x%08X\n", man->available_caching);
+	printf("    default_caching: 0x%08X\n", man->default_caching);
+	if (mem_type != TTM_PL_SYSTEM)
+		(*man->func->debug)(man, TTM_PFX);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+					struct ttm_placement *placement)
+{
+	int i, ret, mem_type;
+
+	printf("No space for %p (%lu pages, %luK, %luM)\n",
+	       bo, bo->mem.num_pages, bo->mem.size >> 10,
+	       bo->mem.size >> 20);
+	for (i = 0; i < placement->num_placement; i++) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return;
+		printf("  placement[%d]=0x%08X (%d)\n",
+		       i, placement->placement[i], mem_type);
+		ttm_mem_type_debug(bo->bdev, mem_type);
+	}
+}
+
+#if 0
+static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
+    char *buffer)
+{
+
+	return snprintf(buffer, PAGE_SIZE, "%lu\n",
+			(unsigned long) atomic_read(&glob->bo_count));
+}
+#endif
+
+static inline uint32_t ttm_bo_type_flags(unsigned type)
+{
+	return 1 << (type);
+}
+
+static void ttm_bo_release_list(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	size_t acc_size = bo->acc_size;
+
+	MPASS(atomic_read(&bo->list_kref) == 0);
+	MPASS(atomic_read(&bo->kref) == 0);
+	MPASS(atomic_read(&bo->cpu_writers) == 0);
+	MPASS(bo->sync_obj == NULL);
+	MPASS(bo->mem.mm_node == NULL);
+	MPASS(list_empty(&bo->lru));
+	MPASS(list_empty(&bo->ddestroy));
+
+	if (bo->ttm)
+		ttm_tt_destroy(bo->ttm);
+	atomic_dec(&bo->glob->bo_count);
+	if (bo->destroy)
+		bo->destroy(bo);
+	else {
+		free(bo, M_TTM_BO);
+	}
+	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+}
+
+static int
+ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible)
+{
+	const char *wmsg;
+	int flags, ret;
+
+	ret = 0;
+	if (interruptible) {
+		flags = PCATCH;
+		wmsg = "ttbowi";
+	} else {
+		flags = 0;
+		wmsg = "ttbowu";
+	}
+	while (ttm_bo_is_reserved(bo)) {
+		ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
+		if (ret == -EINTR || ret == -ERESTART)
+			ret = -ERESTARTSYS;
+		if (ret != 0)
+			break;
+	}
+	return (ret);
+}
+
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man;
+
+	MPASS(ttm_bo_is_reserved(bo));
+
+	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+
+		MPASS(list_empty(&bo->lru));
+
+		man = &bdev->man[bo->mem.mem_type];
+		list_add_tail(&bo->lru, &man->lru);
+		refcount_acquire(&bo->list_kref);
+
+		if (bo->ttm != NULL) {
+			list_add_tail(&bo->swap, &bo->glob->swap_lru);
+			refcount_acquire(&bo->list_kref);
+		}
+	}
+}
+
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+{
+	int put_count = 0;
+
+	if (!list_empty(&bo->swap)) {
+		list_del_init(&bo->swap);
+		++put_count;
+	}
+	if (!list_empty(&bo->lru)) {
+		list_del_init(&bo->lru);
+		++put_count;
+	}
+
+	/*
+	 * TODO: Add a driver hook to delete from
+	 * driver-specific LRU's here.
+	 */
+
+	return put_count;
+}
+
+int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
+			  bool interruptible,
+			  bool no_wait, bool use_sequence, uint32_t sequence)
+{
+	int ret;
+
+	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+		/**
+		 * Deadlock avoidance for multi-bo reserving.
+		 */
+		if (use_sequence && bo->seq_valid) {
+			/**
+			 * We've already reserved this one.
+			 */
+			if (unlikely(sequence == bo->val_seq))
+				return -EDEADLK;
+			/**
+			 * Already reserved by a thread that will not back
+			 * off for us. We need to back off.
+			 */
+			if (unlikely(sequence - bo->val_seq < (1U << 31)))
+				return -EAGAIN;
+		}
+
+		if (no_wait)
+			return -EBUSY;
+
+		ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
+
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if (use_sequence) {
+		bool wake_up = false;
+		/**
+		 * Wake up waiters that may need to recheck for deadlock,
+		 * if we decreased the sequence number.
+		 */
+		if (unlikely((bo->val_seq - sequence < (1U << 31))
+			     || !bo->seq_valid))
+			wake_up = true;
+
+		/*
+		 * In the worst case with memory ordering these values can be
+		 * seen in the wrong order. However since we call wake_up_all
+		 * in that case, this will hopefully not pose a problem,
+		 * and the worst case would only cause someone to accidentally
+		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
+		 * val_seq. However this would only happen if seq_valid was
+		 * written before val_seq was, and just means some slightly
+		 * increased cpu usage
+		 */
+		bo->val_seq = sequence;
+		bo->seq_valid = true;
+		if (wake_up)
+			wakeup(bo);
+	} else {
+		bo->seq_valid = false;
+	}
+
+	return 0;
+}
+
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+			 bool never_free)
+{
+	u_int old;
+
+	old = atomic_fetchadd_int(&bo->list_kref, -count);
+	if (old <= count) {
+		if (never_free)
+			panic("ttm_bo_ref_buf");
+		ttm_bo_release_list(bo);
+	}
+}
+
+int ttm_bo_reserve(struct ttm_buffer_object *bo,
+		   bool interruptible,
+		   bool no_wait, bool use_sequence, uint32_t sequence)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	int put_count = 0;
+	int ret;
+
+	mtx_lock(&bo->glob->lru_lock);
+	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
+				   sequence);
+	if (likely(ret == 0)) {
+		put_count = ttm_bo_del_from_lru(bo);
+		mtx_unlock(&glob->lru_lock);
+		ttm_bo_list_ref_sub(bo, put_count, true);
+	} else
+		mtx_unlock(&bo->glob->lru_lock);
+
+	return ret;
+}
+
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+				  bool interruptible, uint32_t sequence)
+{
+	bool wake_up = false;
+	int ret;
+
+	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+		if (bo->seq_valid && sequence == bo->val_seq) {
+			DRM_ERROR(
+			    "%s: bo->seq_valid && sequence == bo->val_seq",
+			    __func__);
+		}
+
+		ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
+
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if ((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid)
+		wake_up = true;
+
+	/**
+	 * Wake up waiters that may need to recheck for deadlock,
+	 * if we decreased the sequence number.
+	 */
+	bo->val_seq = sequence;
+	bo->seq_valid = true;
+	if (wake_up)
+		wakeup(bo);
+
+	return 0;
+}
+
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+			    bool interruptible, uint32_t sequence)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	int put_count, ret;
+
+	mtx_lock(&glob->lru_lock);
+	ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+	if (likely(!ret)) {
+		put_count = ttm_bo_del_from_lru(bo);
+		mtx_unlock(&glob->lru_lock);
+		ttm_bo_list_ref_sub(bo, put_count, true);
+	} else
+		mtx_unlock(&glob->lru_lock);
+	return ret;
+}
+
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+	ttm_bo_add_to_lru(bo);
+	atomic_set(&bo->reserved, 0);
+	wakeup(bo);
+}
+
+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_global *glob = bo->glob;
+
+	mtx_lock(&glob->lru_lock);
+	ttm_bo_unreserve_locked(bo);
+	mtx_unlock(&glob->lru_lock);
+}
+
+/*
+ * Call bo->mutex locked.
+ */
+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_global *glob = bo->glob;
+	int ret = 0;
+	uint32_t page_flags = 0;
+
+	TTM_ASSERT_LOCKED(&bo->mutex);
+	bo->ttm = NULL;
+
+	if (bdev->need_dma32)
+		page_flags |= TTM_PAGE_FLAG_DMA32;
+
+	switch (bo->type) {
+	case ttm_bo_type_device:
+		if (zero_alloc)
+			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+	case ttm_bo_type_kernel:
+		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+						      page_flags, glob->dummy_read_page);
+		if (unlikely(bo->ttm == NULL))
+			ret = -ENOMEM;
+		break;
+	case ttm_bo_type_sg:
+		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+						      page_flags | TTM_PAGE_FLAG_SG,
+						      glob->dummy_read_page);
+		if (unlikely(bo->ttm == NULL)) {
+			ret = -ENOMEM;
+			break;
+		}
+		bo->ttm->sg = bo->sg;
+		break;
+	default:
+		printf("[TTM] Illegal buffer object type\n");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+				  struct ttm_mem_reg *mem,
+				  bool evict, bool interruptible,
+				  bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
+	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
+	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
+	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
+	int ret = 0;
+
+	if (old_is_pci || new_is_pci ||
+	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+		ret = ttm_mem_io_lock(old_man, true);
+		if (unlikely(ret != 0))
+			goto out_err;
+		ttm_bo_unmap_virtual_locked(bo);
+		ttm_mem_io_unlock(old_man);
+	}
+
+	/*
+	 * Create and bind a ttm if required.
+	 */
+
+	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+		if (bo->ttm == NULL) {
+			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+			ret = ttm_bo_add_ttm(bo, zero);
+			if (ret)
+				goto out_err;
+		}
+
+		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+		if (ret)
+			goto out_err;
+
+		if (mem->mem_type != TTM_PL_SYSTEM) {
+			ret = ttm_tt_bind(bo->ttm, mem);
+			if (ret)
+				goto out_err;
+		}
+
+		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+			if (bdev->driver->move_notify)
+				bdev->driver->move_notify(bo, mem);
+			bo->mem = *mem;
+			mem->mm_node = NULL;
+			goto moved;
+		}
+	}
+
+	if (bdev->driver->move_notify)
+		bdev->driver->move_notify(bo, mem);
+
+	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+	else if (bdev->driver->move)
+		ret = bdev->driver->move(bo, evict, interruptible,
+					 no_wait_gpu, mem);
+	else
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+
+	if (ret) {
+		if (bdev->driver->move_notify) {
+			struct ttm_mem_reg tmp_mem = *mem;
+			*mem = bo->mem;
+			bo->mem = tmp_mem;
+			bdev->driver->move_notify(bo, mem);
+			bo->mem = *mem;
+			*mem = tmp_mem;
+		}
+
+		goto out_err;
+	}
+
+moved:
+	if (bo->evicted) {
+		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+		if (ret)
+			printf("[TTM] Can not flush read caches\n");
+		bo->evicted = false;
+	}
+
+	if (bo->mem.mm_node) {
+		bo->offset = (bo->mem.start << PAGE_SHIFT) +
+		    bdev->man[bo->mem.mem_type].gpu_offset;
+		bo->cur_placement = bo->mem.placement;
+	} else
+		bo->offset = 0;
+
+	return 0;
+
+out_err:
+	new_man = &bdev->man[bo->mem.mem_type];
+	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
+		ttm_tt_unbind(bo->ttm);
+		ttm_tt_destroy(bo->ttm);
+		bo->ttm = NULL;
+	}
+
+	return ret;
+}
+
+/**
+ * Call bo::reserved.
+ * Will release GPU memory type usage on destruction.
+ * This is the place to put in driver specific hooks to release
+ * driver private resources.
+ * Will release the bo::reserved lock.
+ */
+
+static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+{
+	if (bo->bdev->driver->move_notify)
+		bo->bdev->driver->move_notify(bo, NULL);
+
+	if (bo->ttm) {
+		ttm_tt_unbind(bo->ttm);
+		ttm_tt_destroy(bo->ttm);
+		bo->ttm = NULL;
+	}
+	ttm_bo_mem_put(bo, &bo->mem);
+
+	atomic_set(&bo->reserved, 0);
+	wakeup(&bo);
+
+	/*
+	 * Since the final reference to this bo may not be dropped by
+	 * the current task we have to put a memory barrier here to make
+	 * sure the changes done in this function are always visible.
+	 *
+	 * This function only needs protection against the final kref_put.
+	 */
+	mb();
+}
+
+static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_global *glob = bo->glob;
+	struct ttm_bo_driver *driver = bdev->driver;
+	void *sync_obj = NULL;
+	int put_count;
+	int ret;
+
+	mtx_lock(&glob->lru_lock);
+	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+
+	mtx_lock(&bdev->fence_lock);
+	(void) ttm_bo_wait(bo, false, false, true);
+	if (!ret && !bo->sync_obj) {
+		mtx_unlock(&bdev->fence_lock);
+		put_count = ttm_bo_del_from_lru(bo);
+
+		mtx_unlock(&glob->lru_lock);
+		ttm_bo_cleanup_memtype_use(bo);
+
+		ttm_bo_list_ref_sub(bo, put_count, true);
+
+		return;
+	}
+	if (bo->sync_obj)
+		sync_obj = driver->sync_obj_ref(bo->sync_obj);
+	mtx_unlock(&bdev->fence_lock);
+
+	if (!ret) {
+		atomic_set(&bo->reserved, 0);
+		wakeup(bo);
+	}
+
+	refcount_acquire(&bo->list_kref);
+	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+	mtx_unlock(&glob->lru_lock);
+
+	if (sync_obj) {
+		driver->sync_obj_flush(sync_obj);
+		driver->sync_obj_unref(&sync_obj);
+	}
+	taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+	    ((hz / 100) < 1) ? 1 : hz / 100);
+}
+
+/**
+ * function ttm_bo_cleanup_refs_and_unlock
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, do nothing.
+ *
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
+ * @interruptible         Any sleeps should occur interruptibly.
+ * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
+ */
+
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+					  bool interruptible,
+					  bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+	struct ttm_bo_global *glob = bo->glob;
+	int put_count;
+	int ret;
+
+	mtx_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, false, true);
+
+	if (ret && !no_wait_gpu) {
+		void *sync_obj;
+
+		/*
+		 * Take a reference to the fence and unreserve,
+		 * at this point the buffer should be dead, so
+		 * no new sync objects can be attached.
+		 */
+		sync_obj = driver->sync_obj_ref(bo->sync_obj);
+		mtx_unlock(&bdev->fence_lock);
+
+		atomic_set(&bo->reserved, 0);
+		wakeup(bo);
+		mtx_unlock(&glob->lru_lock);
+
+		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+		driver->sync_obj_unref(&sync_obj);
+		if (ret)
+			return ret;
+
+		/*
+		 * remove sync_obj with ttm_bo_wait, the wait should be
+		 * finished, and no new wait object should have been added.
+		 */
+		mtx_lock(&bdev->fence_lock);
+		ret = ttm_bo_wait(bo, false, false, true);
+		mtx_unlock(&bdev->fence_lock);
+		if (ret)
+			return ret;
+
+		mtx_lock(&glob->lru_lock);
+		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+
+		/*
+		 * We raced, and lost, someone else holds the reservation now,
+		 * and is probably busy in ttm_bo_cleanup_memtype_use.
+		 *
+		 * Even if it's not the case, because we finished waiting any
+		 * delayed destruction would succeed, so just return success
+		 * here.
+		 */
+		if (ret) {
+			mtx_unlock(&glob->lru_lock);
+			return 0;
+		}
+	} else
+		mtx_unlock(&bdev->fence_lock);
+
+	if (ret || unlikely(list_empty(&bo->ddestroy))) {
+		atomic_set(&bo->reserved, 0);
+		wakeup(bo);
+		mtx_unlock(&glob->lru_lock);
+		return ret;
+	}
+
+	put_count = ttm_bo_del_from_lru(bo);
+	list_del_init(&bo->ddestroy);
+	++put_count;
+
+	mtx_unlock(&glob->lru_lock);
+	ttm_bo_cleanup_memtype_use(bo);
+
+	ttm_bo_list_ref_sub(bo, put_count, true);
+
+	return 0;
+}
+
+/**
+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
+ * encountered buffers.
+ */
+
+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+{
+	struct ttm_bo_global *glob = bdev->glob;
+	struct ttm_buffer_object *entry = NULL;
+	int ret = 0;
+
+	mtx_lock(&glob->lru_lock);
+	if (list_empty(&bdev->ddestroy))
+		goto out_unlock;
+
+	entry = list_first_entry(&bdev->ddestroy,
+		struct ttm_buffer_object, ddestroy);
+	refcount_acquire(&entry->list_kref);
+
+	for (;;) {
+		struct ttm_buffer_object *nentry = NULL;
+
+		if (entry->ddestroy.next != &bdev->ddestroy) {
+			nentry = list_first_entry(&entry->ddestroy,
+				struct ttm_buffer_object, ddestroy);
+			refcount_acquire(&nentry->list_kref);
+		}
+
+		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+		if (remove_all && ret) {
+			ret = ttm_bo_reserve_nolru(entry, false, false,
+						   false, 0);
+		}
+
+		if (!ret)
+			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+							     !remove_all);
+		else
+			mtx_unlock(&glob->lru_lock);
+
+		if (refcount_release(&entry->list_kref))
+			ttm_bo_release_list(entry);
+		entry = nentry;
+
+		if (ret || !entry)
+			goto out;
+
+		mtx_lock(&glob->lru_lock);
+		if (list_empty(&entry->ddestroy))
+			break;
+	}
+
+out_unlock:
+	mtx_unlock(&glob->lru_lock);
+out:
+	if (entry && refcount_release(&entry->list_kref))
+		ttm_bo_release_list(entry);
+	return ret;
+}
+
+static void ttm_bo_delayed_workqueue(void *arg, int pending __unused)
+{
+	struct ttm_bo_device *bdev = arg;
+
+	if (ttm_bo_delayed_delete(bdev, false)) {
+		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+		    ((hz / 100) < 1) ? 1 : hz / 100);
+	}
+}
+
+static void ttm_bo_release(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+	rw_wlock(&bdev->vm_lock);
+	if (likely(bo->vm_node != NULL)) {
+		RB_REMOVE(ttm_bo_device_buffer_objects,
+		    &bdev->addr_space_rb, bo);
+		drm_mm_put_block(bo->vm_node);
+		bo->vm_node = NULL;
+	}
+	rw_wunlock(&bdev->vm_lock);
+	ttm_mem_io_lock(man, false);
+	ttm_mem_io_free_vm(bo);
+	ttm_mem_io_unlock(man);
+	ttm_bo_cleanup_refs_or_queue(bo);
+	if (refcount_release(&bo->list_kref))
+		ttm_bo_release_list(bo);
+}
+
+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+{
+	struct ttm_buffer_object *bo = *p_bo;
+
+	*p_bo = NULL;
+	if (refcount_release(&bo->kref))
+		ttm_bo_release(bo);
+}
+
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+	int pending;
+
+	taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending);
+	if (pending)
+		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
+	return (pending);
+}
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+	if (resched) {
+		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+		    ((hz / 100) < 1) ? 1 : hz / 100);
+	}
+}
+
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+			bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_reg evict_mem;
+	struct ttm_placement placement;
+	int ret = 0;
+
+	mtx_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+	mtx_unlock(&bdev->fence_lock);
+
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS) {
+			printf("[TTM] Failed to expire sync object before buffer eviction\n");
+		}
+		goto out;
+	}
+
+	MPASS(ttm_bo_is_reserved(bo));
+
+	evict_mem = bo->mem;
+	evict_mem.mm_node = NULL;
+	evict_mem.bus.io_reserved_vm = false;
+	evict_mem.bus.io_reserved_count = 0;
+
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 0;
+	placement.num_busy_placement = 0;
+	bdev->driver->evict_flags(bo, &placement);
+	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+				no_wait_gpu);
+	if (ret) {
+		if (ret != -ERESTARTSYS) {
+			printf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
+			       bo);
+			ttm_bo_mem_space_debug(bo, &placement);
+		}
+		goto out;
+	}
+
+	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
+				     no_wait_gpu);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			printf("[TTM] Buffer eviction failed\n");
+		ttm_bo_mem_put(bo, &evict_mem);
+		goto out;
+	}
+	bo->evicted = true;
+out:
+	return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+				uint32_t mem_type,
+				bool interruptible,
+				bool no_wait_gpu)
+{
+	struct ttm_bo_global *glob = bdev->glob;
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+	struct ttm_buffer_object *bo;
+	int ret = -EBUSY, put_count;
+
+	mtx_lock(&glob->lru_lock);
+	list_for_each_entry(bo, &man->lru, lru) {
+		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+		if (!ret)
+			break;
+	}
+
+	if (ret) {
+		mtx_unlock(&glob->lru_lock);
+		return ret;
+	}
+
+	refcount_acquire(&bo->list_kref);
+
+	if (!list_empty(&bo->ddestroy)) {
+		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+						     no_wait_gpu);
+		if (refcount_release(&bo->list_kref))
+			ttm_bo_release_list(bo);
+		return ret;
+	}
+
+	put_count = ttm_bo_del_from_lru(bo);
+	mtx_unlock(&glob->lru_lock);
+
+	MPASS(ret == 0);
+
+	ttm_bo_list_ref_sub(bo, put_count, true);
+
+	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
+	ttm_bo_unreserve(bo);
+
+	if (refcount_release(&bo->list_kref))
+		ttm_bo_release_list(bo);
+	return ret;
+}
+
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
+
+	if (mem->mm_node)
+		(*man->func->put_node)(man, mem);
+}
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+					uint32_t mem_type,
+					struct ttm_placement *placement,
+					struct ttm_mem_reg *mem,
+					bool interruptible,
+					bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+	int ret;
+
+	do {
+		ret = (*man->func->get_node)(man, bo, placement, mem);
+		if (unlikely(ret != 0))
+			return ret;
+		if (mem->mm_node)
+			break;
+		ret = ttm_mem_evict_first(bdev, mem_type,
+					  interruptible, no_wait_gpu);
+		if (unlikely(ret != 0))
+			return ret;
+	} while (1);
+	if (mem->mm_node == NULL)
+		return -ENOMEM;
+	mem->mem_type = mem_type;
+	return 0;
+}
+
+static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+				      uint32_t cur_placement,
+				      uint32_t proposed_placement)
+{
+	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
+	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
+
+	/**
+	 * Keep current caching if possible.
+	 */
+
+	if ((cur_placement & caching) != 0)
+		result |= (cur_placement & caching);
+	else if ((man->default_caching & caching) != 0)
+		result |= man->default_caching;
+	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
+		result |= TTM_PL_FLAG_CACHED;
+	else if ((TTM_PL_FLAG_WC & caching) != 0)
+		result |= TTM_PL_FLAG_WC;
+	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
+		result |= TTM_PL_FLAG_UNCACHED;
+
+	return result;
+}
+
+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+				 uint32_t mem_type,
+				 uint32_t proposed_placement,
+				 uint32_t *masked_placement)
+{
+	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+
+	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
+		return false;
+
+	if ((proposed_placement & man->available_caching) == 0)
+		return false;
+
+	cur_flags |= (proposed_placement & man->available_caching);
+
+	*masked_placement = cur_flags;
+	return true;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver.  If free space isn't found, then
+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+			struct ttm_placement *placement,
+			struct ttm_mem_reg *mem,
+			bool interruptible,
+			bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man;
+	uint32_t mem_type = TTM_PL_SYSTEM;
+	uint32_t cur_flags = 0;
+	bool type_found = false;
+	bool type_ok = false;
+	bool has_erestartsys = false;
+	int i, ret;
+
+	mem->mm_node = NULL;
+	for (i = 0; i < placement->num_placement; ++i) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return ret;
+		man = &bdev->man[mem_type];
+
+		type_ok = ttm_bo_mt_compatible(man,
+						mem_type,
+						placement->placement[i],
+						&cur_flags);
+
+		if (!type_ok)
+			continue;
+
+		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+						  cur_flags);
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the memory placement flags to the current flags
+		 */
+		ttm_flag_masked(&cur_flags, placement->placement[i],
+				~TTM_PL_MASK_MEMTYPE);
+
+		if (mem_type == TTM_PL_SYSTEM)
+			break;
+
+		if (man->has_type && man->use_type) {
+			type_found = true;
+			ret = (*man->func->get_node)(man, bo, placement, mem);
+			if (unlikely(ret))
+				return ret;
+		}
+		if (mem->mm_node)
+			break;
+	}
+
+	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
+		mem->mem_type = mem_type;
+		mem->placement = cur_flags;
+		return 0;
+	}
+
+	if (!type_found)
+		return -EINVAL;
+
+	for (i = 0; i < placement->num_busy_placement; ++i) {
+		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+						&mem_type);
+		if (ret)
+			return ret;
+		man = &bdev->man[mem_type];
+		if (!man->has_type)
+			continue;
+		if (!ttm_bo_mt_compatible(man,
+						mem_type,
+						placement->busy_placement[i],
+						&cur_flags))
+			continue;
+
+		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+						  cur_flags);
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the memory placement flags to the current flags
+		 */
+		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+				~TTM_PL_MASK_MEMTYPE);
+
+
+		if (mem_type == TTM_PL_SYSTEM) {
+			mem->mem_type = mem_type;
+			mem->placement = cur_flags;
+			mem->mm_node = NULL;
+			return 0;
+		}
+
+		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+						interruptible, no_wait_gpu);
+		if (ret == 0 && mem->mm_node) {
+			mem->placement = cur_flags;
+			return 0;
+		}
+		if (ret == -ERESTARTSYS)
+			has_erestartsys = true;
+	}
+	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
+	return ret;
+}
+
+static
+int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+			struct ttm_placement *placement,
+			bool interruptible,
+			bool no_wait_gpu)
+{
+	int ret = 0;
+	struct ttm_mem_reg mem;
+	struct ttm_bo_device *bdev = bo->bdev;
+
+	MPASS(ttm_bo_is_reserved(bo));
+
+	/*
+	 * FIXME: It's possible to pipeline buffer moves.
+	 * Have the driver move function wait for idle when necessary,
+	 * instead of doing it here.
+	 */
+	mtx_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+	mtx_unlock(&bdev->fence_lock);
+	if (ret)
+		return ret;
+	mem.num_pages = bo->num_pages;
+	mem.size = mem.num_pages << PAGE_SHIFT;
+	mem.page_alignment = bo->mem.page_alignment;
+	mem.bus.io_reserved_vm = false;
+	mem.bus.io_reserved_count = 0;
+	/*
+	 * Determine where to move the buffer.
+	 */
+	ret = ttm_bo_mem_space(bo, placement, &mem,
+			       interruptible, no_wait_gpu);
+	if (ret)
+		goto out_unlock;
+	ret = ttm_bo_handle_move_mem(bo, &mem, false,
+				     interruptible, no_wait_gpu);
+out_unlock:
+	if (ret && mem.mm_node)
+		ttm_bo_mem_put(bo, &mem);
+	return ret;
+}
+
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
+			     struct ttm_mem_reg *mem)
+{
+	int i;
+
+	if (mem->mm_node && placement->lpfn != 0 &&
+	    (mem->start < placement->fpfn ||
+	     mem->start + mem->num_pages > placement->lpfn))
+		return -1;
+
+	for (i = 0; i < placement->num_placement; i++) {
+		if ((placement->placement[i] & mem->placement &
+			TTM_PL_MASK_CACHING) &&
+			(placement->placement[i] & mem->placement &
+			TTM_PL_MASK_MEM))
+			return i;
+	}
+	return -1;
+}
+
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+			struct ttm_placement *placement,
+			bool interruptible,
+			bool no_wait_gpu)
+{
+	int ret;
+
+	MPASS(ttm_bo_is_reserved(bo));
+	/* Check that range is valid */
+	if (placement->lpfn || placement->fpfn)
+		if (placement->fpfn > placement->lpfn ||
+			(placement->lpfn - placement->fpfn) < bo->num_pages)
+			return -EINVAL;
+	/*
+	 * Check whether we need to move buffer.
+	 */
+	ret = ttm_bo_mem_compat(placement, &bo->mem);
+	if (ret < 0) {
+		ret = ttm_bo_move_buffer(bo, placement, interruptible,
+					 no_wait_gpu);
+		if (ret)
+			return ret;
+	} else {
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the compatible memory placement flags to the active flags
+		 */
+		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+				~TTM_PL_MASK_MEMTYPE);
+	}
+	/*
+	 * We might need to add a TTM.
+	 */
+	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		ret = ttm_bo_add_ttm(bo, true);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	MPASS(!((placement->fpfn || placement->lpfn) &&
+	    (bo->mem.num_pages > (placement->lpfn - placement->fpfn))));
+
+	return 0;
+}
+
+int ttm_bo_init(struct ttm_bo_device *bdev,
+		struct ttm_buffer_object *bo,
+		unsigned long size,
+		enum ttm_bo_type type,
+		struct ttm_placement *placement,
+		uint32_t page_alignment,
+		bool interruptible,
+		struct vm_object *persistent_swap_storage,
+		size_t acc_size,
+		struct sg_table *sg,
+		void (*destroy) (struct ttm_buffer_object *))
+{
+	int ret = 0;
+	unsigned long num_pages;
+	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+	if (ret) {
+		printf("[TTM] Out of kernel memory\n");
+		if (destroy)
+			(*destroy)(bo);
+		else
+			free(bo, M_TTM_BO);
+		return -ENOMEM;
+	}
+
+	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (num_pages == 0) {
+		printf("[TTM] Illegal buffer object size\n");
+		if (destroy)
+			(*destroy)(bo);
+		else
+			free(bo, M_TTM_BO);
+		ttm_mem_global_free(mem_glob, acc_size);
+		return -EINVAL;
+	}
+	bo->destroy = destroy;
+
+	refcount_init(&bo->kref, 1);
+	refcount_init(&bo->list_kref, 1);
+	atomic_set(&bo->cpu_writers, 0);
+	atomic_set(&bo->reserved, 1);
+	INIT_LIST_HEAD(&bo->lru);
+	INIT_LIST_HEAD(&bo->ddestroy);
+	INIT_LIST_HEAD(&bo->swap);
+	INIT_LIST_HEAD(&bo->io_reserve_lru);
+	bo->bdev = bdev;
+	bo->glob = bdev->glob;
+	bo->type = type;
+	bo->num_pages = num_pages;
+	bo->mem.size = num_pages << PAGE_SHIFT;
+	bo->mem.mem_type = TTM_PL_SYSTEM;
+	bo->mem.num_pages = bo->num_pages;
+	bo->mem.mm_node = NULL;
+	bo->mem.page_alignment = page_alignment;
+	bo->mem.bus.io_reserved_vm = false;
+	bo->mem.bus.io_reserved_count = 0;
+	bo->priv_flags = 0;
+	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
+	bo->seq_valid = false;
+	bo->persistent_swap_storage = persistent_swap_storage;
+	bo->acc_size = acc_size;
+	bo->sg = sg;
+	atomic_inc(&bo->glob->bo_count);
+
+	ret = ttm_bo_check_placement(bo, placement);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	/*
+	 * For ttm_bo_type_device buffers, allocate
+	 * address space from the device.
+	 */
+	if (bo->type == ttm_bo_type_device ||
+	    bo->type == ttm_bo_type_sg) {
+		ret = ttm_bo_setup_vm(bo);
+		if (ret)
+			goto out_err;
+	}
+
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	if (ret)
+		goto out_err;
+
+	ttm_bo_unreserve(bo);
+	return 0;
+
+out_err:
+	ttm_bo_unreserve(bo);
+	ttm_bo_unref(&bo);
+
+	return ret;
+}
+
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+		       unsigned long bo_size,
+		       unsigned struct_size)
+{
+	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+	size_t size = 0;
+
+	size += ttm_round_pot(struct_size);
+	size += PAGE_ALIGN(npages * sizeof(void *));
+	size += ttm_round_pot(sizeof(struct ttm_tt));
+	return size;
+}
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+			   unsigned long bo_size,
+			   unsigned struct_size)
+{
+	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+	size_t size = 0;
+
+	size += ttm_round_pot(struct_size);
+	size += PAGE_ALIGN(npages * sizeof(void *));
+	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+	return size;
+}
+
+int ttm_bo_create(struct ttm_bo_device *bdev,
+			unsigned long size,
+			enum ttm_bo_type type,
+			struct ttm_placement *placement,
+			uint32_t page_alignment,
+			bool interruptible,
+			struct vm_object *persistent_swap_storage,
+			struct ttm_buffer_object **p_bo)
+{
+	struct ttm_buffer_object *bo;
+	size_t acc_size;
+	int ret;
+
+	bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
+	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
+	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+			  interruptible, persistent_swap_storage, acc_size,
+			  NULL, NULL);
+	if (likely(ret == 0))
+		*p_bo = bo;
+
+	return ret;
+}
+
+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+					unsigned mem_type, bool allow_errors)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+	struct ttm_bo_global *glob = bdev->glob;
+	int ret;
+
+	/*
+	 * Can't use standard list traversal since we're unlocking.
+	 */
+
+	mtx_lock(&glob->lru_lock);
+	while (!list_empty(&man->lru)) {
+		mtx_unlock(&glob->lru_lock);
+		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+		if (ret) {
+			if (allow_errors) {
+				return ret;
+			} else {
+				printf("[TTM] Cleanup eviction failed\n");
+			}
+		}
+		mtx_lock(&glob->lru_lock);
+	}
+	mtx_unlock(&glob->lru_lock);
+	return 0;
+}
+
+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+	struct ttm_mem_type_manager *man;
+	int ret = -EINVAL;
+
+	if (mem_type >= TTM_NUM_MEM_TYPES) {
+		printf("[TTM] Illegal memory type %d\n", mem_type);
+		return ret;
+	}
+	man = &bdev->man[mem_type];
+
+	if (!man->has_type) {
+		printf("[TTM] Trying to take down uninitialized memory manager type %u\n",
+		       mem_type);
+		return ret;
+	}
+
+	man->use_type = false;
+	man->has_type = false;
+
+	ret = 0;
+	if (mem_type > 0) {
+		ttm_bo_force_list_clean(bdev, mem_type, false);
+
+		ret = (*man->func->takedown)(man);
+	}
+
+	return ret;
+}
+
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
+		printf("[TTM] Illegal memory manager memory type %u\n", mem_type);
+		return -EINVAL;
+	}
+
+	if (!man->has_type) {
+		printf("[TTM] Memory type %u has not been initialized\n", mem_type);
+		return 0;
+	}
+
+	return ttm_bo_force_list_clean(bdev, mem_type, true);
+}
+
+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+			unsigned long p_size)
+{
+	int ret = -EINVAL;
+	struct ttm_mem_type_manager *man;
+
+	MPASS(type < TTM_NUM_MEM_TYPES);
+	man = &bdev->man[type];
+	MPASS(!man->has_type);
+	man->io_reserve_fastpath = true;
+	man->use_io_reserve_lru = false;
+	sx_init(&man->io_reserve_mutex, "ttmman");
+	INIT_LIST_HEAD(&man->io_reserve_lru);
+
+	ret = bdev->driver->init_mem_type(bdev, type, man);
+	if (ret)
+		return ret;
+	man->bdev = bdev;
+
+	ret = 0;
+	if (type != TTM_PL_SYSTEM) {
+		ret = (*man->func->init)(man, p_size);
+		if (ret)
+			return ret;
+	}
+	man->has_type = true;
+	man->use_type = true;
+	man->size = p_size;
+
+	INIT_LIST_HEAD(&man->lru);
+
+	return 0;
+}
+
+static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
+{
+
+	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
+	vm_page_free(glob->dummy_read_page);
+}
+
+void ttm_bo_global_release(struct drm_global_reference *ref)
+{
+	struct ttm_bo_global *glob = ref->object;
+
+	if (refcount_release(&glob->kobj_ref))
+		ttm_bo_global_kobj_release(glob);
+}
+
+int ttm_bo_global_init(struct drm_global_reference *ref)
+{
+	struct ttm_bo_global_ref *bo_ref =
+		container_of(ref, struct ttm_bo_global_ref, ref);
+	struct ttm_bo_global *glob = ref->object;
+	int ret;
+	int tries;
+
+	sx_init(&glob->device_list_mutex, "ttmdlm");
+	mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
+	glob->mem_glob = bo_ref->mem_glob;
+	tries = 0;
+retry:
+	glob->dummy_read_page = vm_page_alloc_contig(NULL, 0,
+	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ,
+	    1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
+
+	if (unlikely(glob->dummy_read_page == NULL)) {
+		if (tries < 1) {
+			vm_pageout_grow_cache(tries, 0, VM_MAX_ADDRESS);
+			tries++;
+			goto retry;
+		}
+		ret = -ENOMEM;
+		goto out_no_drp;
+	}
+
+	INIT_LIST_HEAD(&glob->swap_lru);
+	INIT_LIST_HEAD(&glob->device_list);
+
+	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
+	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
+	if (unlikely(ret != 0)) {
+		printf("[TTM] Could not register buffer object swapout\n");
+		goto out_no_shrink;
+	}
+
+	atomic_set(&glob->bo_count, 0);
+
+	refcount_init(&glob->kobj_ref, 1);
+	return (0);
+
+out_no_shrink:
+	vm_page_free(glob->dummy_read_page);
+out_no_drp:
+	free(glob, M_DRM_GLOBAL);
+	return ret;
+}
+
+int ttm_bo_device_release(struct ttm_bo_device *bdev)
+{
+	int ret = 0;
+	unsigned i = TTM_NUM_MEM_TYPES;
+	struct ttm_mem_type_manager *man;
+	struct ttm_bo_global *glob = bdev->glob;
+
+	while (i--) {
+		man = &bdev->man[i];
+		if (man->has_type) {
+			man->use_type = false;
+			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
+				ret = -EBUSY;
+				printf("[TTM] DRM memory manager type %d is not clean\n",
+				       i);
+			}
+			man->has_type = false;
+		}
+	}
+
+	sx_xlock(&glob->device_list_mutex);
+	list_del(&bdev->device_list);
+	sx_xunlock(&glob->device_list_mutex);
+
+	if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL))
+		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
+
+	while (ttm_bo_delayed_delete(bdev, true))
+		;
+
+	mtx_lock(&glob->lru_lock);
+	if (list_empty(&bdev->ddestroy))
+		TTM_DEBUG("Delayed destroy list was clean\n");
+
+	if (list_empty(&bdev->man[0].lru))
+		TTM_DEBUG("Swap list was clean\n");
+	mtx_unlock(&glob->lru_lock);
+
+	MPASS(drm_mm_clean(&bdev->addr_space_mm));
+	rw_wlock(&bdev->vm_lock);
+	drm_mm_takedown(&bdev->addr_space_mm);
+	rw_wunlock(&bdev->vm_lock);
+
+	return ret;
+}
+
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
+		       struct ttm_bo_global *glob,
+		       struct ttm_bo_driver *driver,
+		       uint64_t file_page_offset,
+		       bool need_dma32)
+{
+	int ret = -EINVAL;
+
+	rw_init(&bdev->vm_lock, "ttmvml");
+	bdev->driver = driver;
+
+	memset(bdev->man, 0, sizeof(bdev->man));
+
+	/*
+	 * Initialize the system memory buffer type.
+	 * Other types need to be driver / IOCTL initialized.
+	 */
+	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
+	if (unlikely(ret != 0))
+		goto out_no_sys;
+
+	RB_INIT(&bdev->addr_space_rb);
+	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
+	if (unlikely(ret != 0))
+		goto out_no_addr_mm;
+
+	TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0,
+	    ttm_bo_delayed_workqueue, bdev);
+	INIT_LIST_HEAD(&bdev->ddestroy);
+	bdev->dev_mapping = NULL;
+	bdev->glob = glob;
+	bdev->need_dma32 = need_dma32;
+	bdev->val_seq = 0;
+	mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF);
+	sx_xlock(&glob->device_list_mutex);
+	list_add_tail(&bdev->device_list, &glob->device_list);
+	sx_xunlock(&glob->device_list_mutex);
+
+	return 0;
+out_no_addr_mm:
+	ttm_bo_clean_mm(bdev, 0);
+out_no_sys:
+	return ret;
+}
+
+/*
+ * buffer object vm functions.
+ */
+
+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+		if (mem->mem_type == TTM_PL_SYSTEM)
+			return false;
+
+		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
+			return false;
+
+		if (mem->placement & TTM_PL_FLAG_CACHED)
+			return false;
+	}
+	return true;
+}
+
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
+{
+
+	ttm_bo_release_mmap(bo);
+	ttm_mem_io_free_vm(bo);
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+	ttm_mem_io_lock(man, false);
+	ttm_bo_unmap_virtual_locked(bo);
+	ttm_mem_io_unlock(man);
+}
+
+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+
+	/* The caller acquired bdev->vm_lock. */
+	RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
+}
+
+/**
+ * ttm_bo_setup_vm:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to ttm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	int ret;
+
+retry_pre_get:
+	ret = drm_mm_pre_get(&bdev->addr_space_mm);
+	if (unlikely(ret != 0))
+		return ret;
+
+	rw_wlock(&bdev->vm_lock);
+	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
+					 bo->mem.num_pages, 0, 0);
+
+	if (unlikely(bo->vm_node == NULL)) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
+					      bo->mem.num_pages, 0);
+
+	if (unlikely(bo->vm_node == NULL)) {
+		rw_wunlock(&bdev->vm_lock);
+		goto retry_pre_get;
+	}
+
+	ttm_bo_vm_insert_rb(bo);
+	rw_wunlock(&bdev->vm_lock);
+	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
+
+	return 0;
+out_unlock:
+	rw_wunlock(&bdev->vm_lock);
+	return ret;
+}
+
+int ttm_bo_wait(struct ttm_buffer_object *bo,
+		bool lazy, bool interruptible, bool no_wait)
+{
+	struct ttm_bo_driver *driver = bo->bdev->driver;
+	struct ttm_bo_device *bdev = bo->bdev;
+	void *sync_obj;
+	int ret = 0;
+
+	if (likely(bo->sync_obj == NULL))
+		return 0;
+
+	while (bo->sync_obj) {
+
+		if (driver->sync_obj_signaled(bo->sync_obj)) {
+			void *tmp_obj = bo->sync_obj;
+			bo->sync_obj = NULL;
+			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+			mtx_unlock(&bdev->fence_lock);
+			driver->sync_obj_unref(&tmp_obj);
+			mtx_lock(&bdev->fence_lock);
+			continue;
+		}
+
+		if (no_wait)
+			return -EBUSY;
+
+		sync_obj = driver->sync_obj_ref(bo->sync_obj);
+		mtx_unlock(&bdev->fence_lock);
+		ret = driver->sync_obj_wait(sync_obj,
+					    lazy, interruptible);
+		if (unlikely(ret != 0)) {
+			driver->sync_obj_unref(&sync_obj);
+			mtx_lock(&bdev->fence_lock);
+			return ret;
+		}
+		mtx_lock(&bdev->fence_lock);
+		if (likely(bo->sync_obj == sync_obj)) {
+			void *tmp_obj = bo->sync_obj;
+			bo->sync_obj = NULL;
+			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
+				  &bo->priv_flags);
+			mtx_unlock(&bdev->fence_lock);
+			driver->sync_obj_unref(&sync_obj);
+			driver->sync_obj_unref(&tmp_obj);
+			mtx_lock(&bdev->fence_lock);
+		} else {
+			mtx_unlock(&bdev->fence_lock);
+			driver->sync_obj_unref(&sync_obj);
+			mtx_lock(&bdev->fence_lock);
+		}
+	}
+	return 0;
+}
+
+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	int ret = 0;
+
+	/*
+	 * Using ttm_bo_reserve makes sure the lru lists are updated.
+	 */
+
+	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+	if (unlikely(ret != 0))
+		return ret;
+	mtx_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, true, no_wait);
+	mtx_unlock(&bdev->fence_lock);
+	if (likely(ret == 0))
+		atomic_inc(&bo->cpu_writers);
+	ttm_bo_unreserve(bo);
+	return ret;
+}
+
+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
+{
+	atomic_dec(&bo->cpu_writers);
+}
+
+/**
+ * A buffer object shrink method that tries to swap out the first
+ * buffer object on the bo_global::swap_lru list.
+ */
+
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+{
+	struct ttm_bo_global *glob =
+	    container_of(shrink, struct ttm_bo_global, shrink);
+	struct ttm_buffer_object *bo;
+	int ret = -EBUSY;
+	int put_count;
+	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+
+	mtx_lock(&glob->lru_lock);
+	list_for_each_entry(bo, &glob->swap_lru, swap) {
+		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+		if (!ret)
+			break;
+	}
+
+	if (ret) {
+		mtx_unlock(&glob->lru_lock);
+		return ret;
+	}
+
+	refcount_acquire(&bo->list_kref);
+
+	if (!list_empty(&bo->ddestroy)) {
+		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+		if (refcount_release(&bo->list_kref))
+			ttm_bo_release_list(bo);
+		return ret;
+	}
+
+	put_count = ttm_bo_del_from_lru(bo);
+	mtx_unlock(&glob->lru_lock);
+
+	ttm_bo_list_ref_sub(bo, put_count, true);
+
+	/**
+	 * Wait for GPU, then move to system cached.
+	 */
+
+	mtx_lock(&bo->bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, false, false);
+	mtx_unlock(&bo->bdev->fence_lock);
+
+	if (unlikely(ret != 0))
+		goto out;
+
+	if ((bo->mem.placement & swap_placement) != swap_placement) {
+		struct ttm_mem_reg evict_mem;
+
+		evict_mem = bo->mem;
+		evict_mem.mm_node = NULL;
+		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+		evict_mem.mem_type = TTM_PL_SYSTEM;
+
+		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
+					     false, false);
+		if (unlikely(ret != 0))
+			goto out;
+	}
+
+	ttm_bo_unmap_virtual(bo);
+
+	/**
+	 * Swap out. Buffer will be swapped in again as soon as
+	 * anyone tries to access a ttm page.
+	 */
+
+	if (bo->bdev->driver->swap_notify)
+		bo->bdev->driver->swap_notify(bo);
+
+	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
+out:
+
+	/**
+	 *
+	 * Unreserve without putting on LRU to avoid swapping out an
+	 * already swapped buffer.
+	 */
+
+	atomic_set(&bo->reserved, 0);
+	wakeup(bo);
+	if (refcount_release(&bo->list_kref))
+		ttm_bo_release_list(bo);
+	return ret;
+}
+
+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+{
+	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
+		;
+}


Property changes on: trunk/sys/dev/drm2/ttm/ttm_bo.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_bo_api.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_bo_api.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_bo_api.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,741 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_bo_api.h 247835 2013-03-05 09:49:34Z kib $ */
+
+#ifndef _TTM_BO_API_H_
+#define _TTM_BO_API_H_
+
+#include <dev/drm2/drmP.h>
+
+struct ttm_bo_device;
+
+struct drm_mm_node;
+
+
+/**
+ * struct ttm_placement
+ *
+ * @fpfn:		first valid page frame number to put the object
+ * @lpfn:		last valid page frame number to put the object
+ * @num_placement:	number of preferred placements
+ * @placement:		preferred placements
+ * @num_busy_placement:	number of preferred placements when need to evict buffer
+ * @busy_placement:	preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+	unsigned	fpfn;
+	unsigned	lpfn;
+	unsigned	num_placement;
+	const uint32_t	*placement;
+	unsigned	num_busy_placement;
+	const uint32_t	*busy_placement;
+};
+
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr:		mapped virtual address
+ * @base:		bus base address
+ * @is_iomem:		is this io memory ?
+ * @size:		size in byte
+ * @offset:		offset from the base address
+ * @io_reserved_vm:     The VM system has a refcount in @io_reserved_count
+ * @io_reserved_count:  Refcounting the numbers of callers to ttm_mem_io_reserve
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+	void		*addr;
+	unsigned long	base;
+	unsigned long	size;
+	unsigned long	offset;
+	bool		is_iomem;
+	bool		io_reserved_vm;
+	uint64_t        io_reserved_count;
+};
+
+
+/**
+ * struct ttm_mem_reg
+ *
+ * @mm_node: Memory manager node.
+ * @size: Requested size of memory region.
+ * @num_pages: Actual size of memory region in pages.
+ * @page_alignment: Page alignment.
+ * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
+ *
+ * Structure indicating the placement and space resources used by a
+ * buffer object.
+ */
+
+struct ttm_mem_reg {
+	void *mm_node;
+	unsigned long start;
+	unsigned long size;
+	unsigned long num_pages;
+	uint32_t page_alignment;
+	uint32_t mem_type;
+	uint32_t placement;
+	struct ttm_bus_placement bus;
+};
+
+/**
+ * enum ttm_bo_type
+ *
+ * @ttm_bo_type_device:	These are 'normal' buffers that can
+ * be mmapped by user space. Each of these bos occupy a slot in the
+ * device address space, that can be used for normal vm operations.
+ *
+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
+ * but they cannot be accessed from user-space. For kernel-only use.
+ *
+ * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
+ * driver.
+ */
+
+enum ttm_bo_type {
+	ttm_bo_type_device,
+	ttm_bo_type_kernel,
+	ttm_bo_type_sg
+};
+
+struct ttm_tt;
+
+/**
+ * struct ttm_buffer_object
+ *
+ * @bdev: Pointer to the buffer object device structure.
+ * @type: The bo type.
+ * @destroy: Destruction function. If NULL, kfree is used.
+ * @num_pages: Actual number of pages.
+ * @addr_space_offset: Address space offset.
+ * @acc_size: Accounted size for this object.
+ * @kref: Reference count of this buffer object. When this refcount reaches
+ * zero, the object is put on the delayed delete list.
+ * @list_kref: List reference count of this buffer object. This member is
+ * used to avoid destruction while the buffer object is still on a list.
+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
+ * keeps one refcount. When this refcount reaches zero,
+ * the object is destroyed.
+ * @event_queue: Queue for processes waiting on buffer object status change.
+ * @mem: structure describing current placement.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object.
+ * @ttm: TTM structure holding system pages.
+ * @evicted: Whether the object was evicted without user-space knowing.
+ * @cpu_writes: For synchronization. Number of cpu writers.
+ * @lru: List head for the lru list.
+ * @ddestroy: List head for the delayed destroy list.
+ * @swap: List head for swap LRU list.
+ * @val_seq: Sequence of the validation holding the @reserved lock.
+ * Used to avoid starvation when many processes compete to validate the
+ * buffer. This member is protected by the bo_device::lru_lock.
+ * @seq_valid: The value of @val_seq is valid. This value is protected by
+ * the bo_device::lru_lock.
+ * @reserved: Deadlock-free lock used for synchronization state transitions.
+ * @sync_obj: Pointer to a synchronization object.
+ * @priv_flags: Flags describing buffer object internal state.
+ * @vm_rb: Rb node for the vm rb tree.
+ * @vm_node: Address space manager node.
+ * @offset: The current GPU offset, which can have different meanings
+ * depending on the memory type. For SYSTEM type memory, it should be 0.
+ * @cur_placement: Hint of current placement.
+ *
+ * Base class for TTM buffer object, that deals with data placement and CPU
+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
+ * the driver can usually use the placement offset @offset directly as the
+ * GPU virtual address. For drivers implementing multiple
+ * GPU memory manager contexts, the driver should manage the address space
+ * in these contexts separately and use these objects to get the correct
+ * placement and caching for these GPU maps. This makes it possible to use
+ * these objects for even quite elaborate memory management schemes.
+ * The destroy member, the API visibility of this object makes it possible
+ * to derive driver specific types.
+ */
+
+struct ttm_buffer_object {
+	/**
+	 * Members constant at init.
+	 */
+
+	struct ttm_bo_global *glob;
+	struct ttm_bo_device *bdev;
+	enum ttm_bo_type type;
+	void (*destroy) (struct ttm_buffer_object *);
+	unsigned long num_pages;
+	uint64_t addr_space_offset;
+	size_t acc_size;
+
+	/**
+	* Members not needing protection.
+	*/
+
+	u_int kref;
+	u_int list_kref;
+	/* wait_queue_head_t event_queue; */
+
+	/**
+	 * Members protected by the bo::reserved lock.
+	 */
+
+	struct ttm_mem_reg mem;
+	struct vm_object *persistent_swap_storage;
+	struct ttm_tt *ttm;
+	bool evicted;
+
+	/**
+	 * Members protected by the bo::reserved lock only when written to.
+	 */
+
+	atomic_t cpu_writers;
+
+	/**
+	 * Members protected by the bdev::lru_lock.
+	 */
+
+	struct list_head lru;
+	struct list_head ddestroy;
+	struct list_head swap;
+	struct list_head io_reserve_lru;
+	uint32_t val_seq;
+	bool seq_valid;
+
+	/**
+	 * Members protected by the bdev::lru_lock
+	 * only when written to.
+	 */
+
+	atomic_t reserved;
+
+	/**
+	 * Members protected by struct buffer_object_device::fence_lock
+	 * In addition, setting sync_obj to anything else
+	 * than NULL requires bo::reserved to be held. This allows for
+	 * checking NULL while reserved but not holding the mentioned lock.
+	 */
+
+	void *sync_obj;
+	unsigned long priv_flags;
+
+	/**
+	 * Members protected by the bdev::vm_lock
+	 */
+
+	RB_ENTRY(ttm_buffer_object) vm_rb;
+	struct drm_mm_node *vm_node;
+
+
+	/**
+	 * Special members that are protected by the reserve lock
+	 * and the bo::lock when written to. Can be read with
+	 * either of these locks held.
+	 */
+
+	unsigned long offset;
+	uint32_t cur_placement;
+
+	struct sg_table *sg;
+};
+
+/**
+ * struct ttm_bo_kmap_obj
+ *
+ * @virtual: The current kernel virtual address.
+ * @page: The page when kmap'ing a single page.
+ * @bo_kmap_type: Type of bo_kmap.
+ *
+ * Object describing a kernel mapping. Since a TTM bo may be located
+ * in various memory types with various caching policies, the
+ * mapping can either be an ioremap, a vmap, a kmap or part of a
+ * premapped region.
+ */
+
+#define TTM_BO_MAP_IOMEM_MASK 0x80
+struct ttm_bo_kmap_obj {
+	void *virtual;
+	struct vm_page *page;
+	struct sf_buf *sf;
+	int num_pages;
+	unsigned long size;
+	enum {
+		ttm_bo_map_iomap        = 1 | TTM_BO_MAP_IOMEM_MASK,
+		ttm_bo_map_vmap         = 2,
+		ttm_bo_map_kmap         = 3,
+		ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
+	} bo_kmap_type;
+	struct ttm_buffer_object *bo;
+};
+
+/**
+ * ttm_bo_reference - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ *
+ * Returns a refcounted pointer to a buffer object.
+ */
+
+static inline struct ttm_buffer_object *
+ttm_bo_reference(struct ttm_buffer_object *bo)
+{
+	refcount_acquire(&bo->kref);
+	return bo;
+}
+
+/**
+ * ttm_bo_wait - wait for buffer idle.
+ *
+ * @bo:  The buffer object.
+ * @interruptible:  Use interruptible wait.
+ * @no_wait:  Return immediately if buffer is busy.
+ *
+ * This function must be called with the bo::mutex held, and makes
+ * sure any previous rendering to the buffer is completed.
+ * Note: It might be necessary to block validations before the
+ * wait by reserving the buffer.
+ * Returns -EBUSY if no_wait is true and the buffer is busy.
+ * Returns -ERESTARTSYS if interrupted by a signal.
+ */
+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+		       bool interruptible, bool no_wait);
+/**
+ * ttm_bo_validate
+ *
+ * @bo: The buffer object.
+ * @placement: Proposed placement for the buffer object.
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according proposed placement.
+ * Returns
+ * -EINVAL on invalid proposed placement.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+extern int ttm_bo_validate(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement,
+				bool interruptible,
+				bool no_wait_gpu);
+
+/**
+ * ttm_bo_unref
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference and clear a pointer to a buffer object.
+ */
+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+				bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
+/**
+ * ttm_bo_lock_delayed_workqueue
+ *
+ * Prevent the delayed workqueue from running.
+ * Returns
+ * True if the workqueue was queued at the time
+ */
+extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_unlock_delayed_workqueue
+ *
+ * Allows the delayed workqueue to run.
+ */
+extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
+					    int resched);
+
+/**
+ * ttm_bo_synccpu_write_grab
+ *
+ * @bo: The buffer object:
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * Synchronizes a buffer object for CPU RW access. This means
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
+ * Returns
+ * -EBUSY if the buffer is busy and no_wait is true.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+extern int
+ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+
+/**
+ * ttm_bo_synccpu_write_release:
+ *
+ * @bo : The buffer object.
+ *
+ * Releases a synccpu lock.
+ */
+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_acc_size
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo_size: size of the buffer object in byte.
+ * @struct_size: size of the structure holding buffer object datas
+ *
+ * Returns size to account for a buffer object
+ */
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+		       unsigned long bo_size,
+		       unsigned struct_size);
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+			   unsigned long bo_size,
+			   unsigned struct_size);
+
+/**
+ * ttm_bo_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @acc_size: Accounted size for this object.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ * On successful return, the object kref and list_kref are set to 1.
+ * If a failure occurs, the function will call the @destroy function, or
+ * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
+ * illegal and will likely cause memory corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+
+extern int ttm_bo_init(struct ttm_bo_device *bdev,
+			struct ttm_buffer_object *bo,
+			unsigned long size,
+			enum ttm_bo_type type,
+			struct ttm_placement *placement,
+			uint32_t page_alignment,
+			bool interrubtible,
+			struct vm_object *persistent_swap_storage,
+			size_t acc_size,
+			struct sg_table *sg,
+			void (*destroy) (struct ttm_buffer_object *));
+
+/**
+ * ttm_bo_synccpu_object_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep while waiting for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @p_bo: On successful completion *p_bo points to the created object.
+ *
+ * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
+ * on that object. The destroy function is set to kfree().
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while waiting for resources.
+ */
+
+extern int ttm_bo_create(struct ttm_bo_device *bdev,
+				unsigned long size,
+				enum ttm_bo_type type,
+				struct ttm_placement *placement,
+				uint32_t page_alignment,
+				bool interruptible,
+				struct vm_object *persistent_swap_storage,
+				struct ttm_buffer_object **p_bo);
+
+/**
+ * ttm_bo_check_placement
+ *
+ * @bo:		the buffer object.
+ * @placement:	placements
+ *
+ * Performs minimal validity checking on an intended change of
+ * placement flags.
+ * Returns
+ * -EINVAL: Intended change is invalid or not allowed.
+ */
+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+					struct ttm_placement *placement);
+
+/**
+ * ttm_bo_init_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ * @p_size: size managed area in pages.
+ *
+ * Initialize a manager for a given memory type.
+ * Note: if part of driver firstopen, it must be protected from a
+ * potentially racing lastclose.
+ * Returns:
+ * -EINVAL: invalid size or memory type.
+ * -ENOMEM: Not enough memory.
+ * May also return driver-specified errors.
+ */
+
+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+				unsigned long p_size);
+/**
+ * ttm_bo_clean_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Take down a manager for a given memory type after first walking
+ * the LRU list to evict any buffers left alive.
+ *
+ * Normally, this function is part of lastclose() or unload(), and at that
+ * point there shouldn't be any buffers left created by user-space, since
+ * there should've been removed by the file descriptor release() method.
+ * However, before this function is run, make sure to signal all sync objects,
+ * and verify that the delayed delete queue is empty. The driver must also
+ * make sure that there are no NO_EVICT buffers present in this memory type
+ * when the call is made.
+ *
+ * If this function is part of a VT switch, the caller must make sure that
+ * there are no appications currently validating buffers before this
+ * function is called. The caller can do that by first taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: invalid or uninitialized memory type.
+ * -EBUSY: There are still buffers left in this memory type.
+ */
+
+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_bo_evict_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Evicts all buffers on the lru list of the memory type.
+ * This is normally part of a VT switch or an
+ * out-of-memory-space-due-to-fragmentation handler.
+ * The caller must make sure that there are no other processes
+ * currently validating buffers, and can do that by taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: Invalid or uninitialized memory type.
+ * -ERESTARTSYS: The call was interrupted by a signal while waiting to
+ * evict a buffer.
+ */
+
+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_kmap_obj_virtual
+ *
+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
+ * virtual map is io memory, 0 if normal memory.
+ *
+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
+ * that should strictly be accessed by the iowriteXX() and similar functions.
+ */
+
+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
+					 bool *is_iomem)
+{
+	*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
+	return map->virtual;
+}
+
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
+
+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
+		       unsigned long num_pages, struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
+
+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ *
+ * @vma:       vma as input from the fbdev mmap method.
+ * @bo:        The bo backing the address space. The address space will
+ * have the same size as the bo, and start at offset 0.
+ *
+ * This function is intended to be called by the fbdev mmap method
+ * if the fbdev address space is to be backed by a bo.
+ */
+
+/* XXXKIB
+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
+			  struct ttm_buffer_object *bo);
+*/
+/**
+ * ttm_bo_mmap - mmap out of the ttm device address space.
+ *
+ * @filp:      filp as input from the mmap method.
+ * @vma:       vma as input from the mmap method.
+ * @bdev:      Pointer to the ttm_bo_device with the address space manager.
+ *
+ * This function is intended to be called by the device mmap method.
+ * if the device address space is to be backed by the bo manager.
+ */
+/* XXXKIB
+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+		       struct ttm_bo_device *bdev);
+*/
+/**
+ * ttm_bo_io
+ *
+ * @bdev:      Pointer to the struct ttm_bo_device.
+ * @filp:      Pointer to the struct file attempting to read / write.
+ * @wbuf:      User-space pointer to address of buffer to write. NULL on read.
+ * @rbuf:      User-space pointer to address of buffer to read into.
+ * Null on write.
+ * @count:     Number of bytes to read / write.
+ * @f_pos:     Pointer to current file position.
+ * @write:     1 for read, 0 for write.
+ *
+ * This function implements read / write into ttm buffer objects, and is
+ * intended to
+ * be called from the fops::read and fops::write method.
+ * Returns:
+ * See man (2) write, man(2) read. In particular,
+ * the function may return -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+
+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+			 const char *wbuf, char *rbuf,
+			 size_t count, off_t *f_pos, bool write);
+
+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
+ *
+ * @bo:     The buffer object to check.
+ *
+ * This function returns an indication if a bo is reserved or not, and should
+ * only be used to print an error when it is not from incorrect api usage, since
+ * there's no guarantee that it is the caller that is holding the reservation.
+ */
+static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
+{
+	return atomic_read(&bo->reserved);
+}
+
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_bo_api.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_bo_driver.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_bo_driver.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_bo_driver.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1025 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_bo_driver.h 275408 2014-12-02 14:09:54Z tijl $ */
+
+#ifndef _TTM_BO_DRIVER_H_
+#define _TTM_BO_DRIVER_H_
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_api.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/drm_global.h>
+#include <sys/rwlock.h>
+#include <sys/tree.h>
+
+struct ttm_backend_func {
+	/**
+	 * struct ttm_backend_func member bind
+	 *
+	 * @ttm: Pointer to a struct ttm_tt.
+	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+	 * memory type and location for binding.
+	 *
+	 * Bind the backend pages into the aperture in the location
+	 * indicated by @bo_mem. This function should be able to handle
+	 * differences between aperture and system page sizes.
+	 */
+	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+	/**
+	 * struct ttm_backend_func member unbind
+	 *
+	 * @ttm: Pointer to a struct ttm_tt.
+	 *
+	 * Unbind previously bound backend pages. This function should be
+	 * able to handle differences between aperture and system page sizes.
+	 */
+	int (*unbind) (struct ttm_tt *ttm);
+
+	/**
+	 * struct ttm_backend_func member destroy
+	 *
+	 * @ttm: Pointer to a struct ttm_tt.
+	 *
+	 * Destroy the backend. This will be call back from ttm_tt_destroy so
+	 * don't call ttm_tt_destroy from the callback or infinite loop.
+	 */
+	void (*destroy) (struct ttm_tt *ttm);
+};
+
+#define TTM_PAGE_FLAG_WRITE           (1 << 3)
+#define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
+#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
+#define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
+#define TTM_PAGE_FLAG_DMA32           (1 << 7)
+#define TTM_PAGE_FLAG_SG              (1 << 8)
+
+enum ttm_caching_state {
+	tt_uncached,
+	tt_wc,
+	tt_cached
+};
+
+/**
+ * struct ttm_tt
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
+ * pointer.
+ * @pages: Array of pages backing the data.
+ * @num_pages: Number of pages in the page array.
+ * @bdev: Pointer to the current struct ttm_bo_device.
+ * @be: Pointer to the ttm backend.
+ * @swap_storage: Pointer to shmem struct file for swap storage.
+ * @caching_state: The current caching state of the pages.
+ * @state: The current binding state of the pages.
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+
+struct ttm_tt {
+	struct ttm_bo_device *bdev;
+	struct ttm_backend_func *func;
+	struct vm_page *dummy_read_page;
+	struct vm_page **pages;
+	uint32_t page_flags;
+	unsigned long num_pages;
+	struct sg_table *sg; /* for SG objects via dma-buf */
+	struct ttm_bo_global *glob;
+	struct vm_object *swap_storage;
+	enum ttm_caching_state caching_state;
+	enum {
+		tt_bound,
+		tt_unbound,
+		tt_unpopulated,
+	} state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+	struct ttm_tt ttm;
+	dma_addr_t *dma_address;
+	struct list_head pages_list;
+};
+
+#define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
+#define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
+#define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
+
+struct ttm_mem_type_manager;
+
+struct ttm_mem_type_manager_func {
+	/**
+	 * struct ttm_mem_type_manager member init
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @p_size: Implementation dependent, but typically the size of the
+	 * range to be managed in pages.
+	 *
+	 * Called to initialize a private range manager. The function is
+	 * expected to initialize the man::priv member.
+	 * Returns 0 on success, negative error code on failure.
+	 */
+	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+
+	/**
+	 * struct ttm_mem_type_manager member takedown
+	 *
+	 * @man: Pointer to a memory type manager.
+	 *
+	 * Called to undo the setup done in init. All allocated resources
+	 * should be freed.
+	 */
+	int  (*takedown)(struct ttm_mem_type_manager *man);
+
+	/**
+	 * struct ttm_mem_type_manager member get_node
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @bo: Pointer to the buffer object we're allocating space for.
+	 * @placement: Placement details.
+	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+	 *
+	 * This function should allocate space in the memory type managed
+	 * by @man. Placement details if
+	 * applicable are given by @placement. If successful,
+	 * @mem::mm_node should be set to a non-null value, and
+	 * @mem::start should be set to a value identifying the beginning
+	 * of the range allocated, and the function should return zero.
+	 * If the memory region accommodate the buffer object, @mem::mm_node
+	 * should be set to NULL, and the function should return 0.
+	 * If a system error occurred, preventing the request to be fulfilled,
+	 * the function should return a negative error code.
+	 *
+	 * Note that @mem::mm_node will only be dereferenced by
+	 * struct ttm_mem_type_manager functions and optionally by the driver,
+	 * which has knowledge of the underlying type.
+	 *
+	 * This function may not be called from within atomic context, so
+	 * an implementation can and must use either a mutex or a spinlock to
+	 * protect any data structures managing the space.
+	 */
+	int  (*get_node)(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *bo,
+			 struct ttm_placement *placement,
+			 struct ttm_mem_reg *mem);
+
+	/**
+	 * struct ttm_mem_type_manager member put_node
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+	 *
+	 * This function frees memory type resources previously allocated
+	 * and that are identified by @mem::mm_node and @mem::start. May not
+	 * be called from within atomic context.
+	 */
+	void (*put_node)(struct ttm_mem_type_manager *man,
+			 struct ttm_mem_reg *mem);
+
+	/**
+	 * struct ttm_mem_type_manager member debug
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @prefix: Prefix to be used in printout to identify the caller.
+	 *
+	 * This function is called to print out the state of the memory
+	 * type manager to aid debugging of out-of-memory conditions.
+	 * It may not be called from within atomic context.
+	 */
+	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
+};
+
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @func: structure pointer implementing the range manager. See above
+ * @priv: Driver private closure for @func.
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
+ * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
+ * reserved by the TTM vm system.
+ * @io_reserve_lru: Optional lru list for unreserving io mem regions.
+ * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * static information. bdev::driver::io_mem_free is never used.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+
+
+struct ttm_mem_type_manager {
+	struct ttm_bo_device *bdev;
+
+	/*
+	 * No protection. Constant from start.
+	 */
+
+	bool has_type;
+	bool use_type;
+	uint32_t flags;
+	unsigned long gpu_offset;
+	uint64_t size;
+	uint32_t available_caching;
+	uint32_t default_caching;
+	const struct ttm_mem_type_manager_func *func;
+	void *priv;
+	struct sx io_reserve_mutex;
+	bool use_io_reserve_lru;
+	bool io_reserve_fastpath;
+
+	/*
+	 * Protected by @io_reserve_mutex:
+	 */
+
+	struct list_head io_reserve_lru;
+
+	/*
+	 * Protected by the global->lru_lock.
+	 */
+
+	struct list_head lru;
+};
+
+/**
+ * struct ttm_bo_driver
+ *
+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
+ * has been evicted.
+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
+ * structure.
+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
+ * @move: Callback for a driver to hook in accelerated functions to
+ * move a buffer.
+ * If set to NULL, a potentially slow memcpy() move is used.
+ * @sync_obj_signaled: See ttm_fence_api.h
+ * @sync_obj_wait: See ttm_fence_api.h
+ * @sync_obj_flush: See ttm_fence_api.h
+ * @sync_obj_unref: See ttm_fence_api.h
+ * @sync_obj_ref: See ttm_fence_api.h
+ */
+
+struct ttm_bo_driver {
+	/**
+	 * ttm_tt_create
+	 *
+	 * @bdev: pointer to a struct ttm_bo_device:
+	 * @size: Size of the data needed backing.
+	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+	 * @dummy_read_page: See struct ttm_bo_device.
+	 *
+	 * Create a struct ttm_tt to back data with system memory pages.
+	 * No pages are actually allocated.
+	 * Returns:
+	 * NULL: Out of memory.
+	 */
+	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
+					unsigned long size,
+					uint32_t page_flags,
+					struct vm_page *dummy_read_page);
+
+	/**
+	 * ttm_tt_populate
+	 *
+	 * @ttm: The struct ttm_tt to contain the backing pages.
+	 *
+	 * Allocate all backing pages
+	 * Returns:
+	 * -ENOMEM: Out of memory.
+	 */
+	int (*ttm_tt_populate)(struct ttm_tt *ttm);
+
+	/**
+	 * ttm_tt_unpopulate
+	 *
+	 * @ttm: The struct ttm_tt to contain the backing pages.
+	 *
+	 * Free all backing page
+	 */
+	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
+
+	/**
+	 * struct ttm_bo_driver member invalidate_caches
+	 *
+	 * @bdev: the buffer object device.
+	 * @flags: new placement of the rebound buffer object.
+	 *
+	 * A previosly evicted buffer has been rebound in a
+	 * potentially new location. Tell the driver that it might
+	 * consider invalidating read (texture) caches on the next command
+	 * submission as a consequence.
+	 */
+
+	int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
+	int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
+			      struct ttm_mem_type_manager *man);
+	/**
+	 * struct ttm_bo_driver member evict_flags:
+	 *
+	 * @bo: the buffer object to be evicted
+	 *
+	 * Return the bo flags for a buffer which is not mapped to the hardware.
+	 * These will be placed in proposed_flags so that when the move is
+	 * finished, they'll end up in bo->mem.flags
+	 */
+
+	 void(*evict_flags) (struct ttm_buffer_object *bo,
+				struct ttm_placement *placement);
+	/**
+	 * struct ttm_bo_driver member move:
+	 *
+	 * @bo: the buffer to move
+	 * @evict: whether this motion is evicting the buffer from
+	 * the graphics address space
+	 * @interruptible: Use interruptible sleeps if possible when sleeping.
+	 * @no_wait: whether this should give up and return -EBUSY
+	 * if this move would require sleeping
+	 * @new_mem: the new memory region receiving the buffer
+	 *
+	 * Move a buffer between two memory regions.
+	 */
+	int (*move) (struct ttm_buffer_object *bo,
+		     bool evict, bool interruptible,
+		     bool no_wait_gpu,
+		     struct ttm_mem_reg *new_mem);
+
+	/**
+	 * struct ttm_bo_driver_member verify_access
+	 *
+	 * @bo: Pointer to a buffer object.
+	 * @filp: Pointer to a struct file trying to access the object.
+	 * FreeBSD: use devfs_get_cdevpriv etc.
+	 *
+	 * Called from the map / write / read methods to verify that the
+	 * caller is permitted to access the buffer object.
+	 * This member may be set to NULL, which will refuse this kind of
+	 * access for all buffer objects.
+	 * This function should return 0 if access is granted, -EPERM otherwise.
+	 */
+	int (*verify_access) (struct ttm_buffer_object *bo);
+
+	/**
+	 * In case a driver writer dislikes the TTM fence objects,
+	 * the driver writer can replace those with sync objects of
+	 * his / her own. If it turns out that no driver writer is
+	 * using these. I suggest we remove these hooks and plug in
+	 * fences directly. The bo driver needs the following functionality:
+	 * See the corresponding functions in the fence object API
+	 * documentation.
+	 */
+
+	bool (*sync_obj_signaled) (void *sync_obj);
+	int (*sync_obj_wait) (void *sync_obj,
+			      bool lazy, bool interruptible);
+	int (*sync_obj_flush) (void *sync_obj);
+	void (*sync_obj_unref) (void **sync_obj);
+	void *(*sync_obj_ref) (void *sync_obj);
+
+	/* hook to notify driver about a driver move so it
+	 * can do tiling things */
+	void (*move_notify)(struct ttm_buffer_object *bo,
+			    struct ttm_mem_reg *new_mem);
+	/* notify the driver we are taking a fault on this BO
+	 * and have reserved it */
+	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+
+	/**
+	 * notify the driver that we're about to swap out this bo
+	 */
+	void (*swap_notify) (struct ttm_buffer_object *bo);
+
+	/**
+	 * Driver callback on when mapping io memory (for bo_move_memcpy
+	 * for instance). TTM will take care to call io_mem_free whenever
+	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
+	 * are balanced.
+	 */
+	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+};
+
+/**
+ * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
+ */
+
+struct ttm_bo_global_ref {
+	struct drm_global_reference ref;
+	struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_bo_global - Buffer object driver global data.
+ *
+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages.
+ * @shrink: A shrink callback object used for buffer object swap.
+ * @device_list_mutex: Mutex protecting the device list.
+ * This mutex is held while traversing the device list for pm options.
+ * @lru_lock: Spinlock protecting the bo subsystem lru lists.
+ * @device_list: List of buffer object devices.
+ * @swap_lru: Lru list of buffer objects used for swapping.
+ */
+
+struct ttm_bo_global {
+	u_int kobj_ref;
+
+	/**
+	 * Constant after init.
+	 */
+
+	struct ttm_mem_global *mem_glob;
+	struct vm_page *dummy_read_page;
+	struct ttm_mem_shrink shrink;
+	struct sx device_list_mutex;
+	struct mtx lru_lock;
+
+	/**
+	 * Protected by device_list_mutex.
+	 */
+	struct list_head device_list;
+
+	/**
+	 * Protected by the lru_lock.
+	 */
+	struct list_head swap_lru;
+
+	/**
+	 * Internal protection.
+	 */
+	atomic_t bo_count;
+};
+
+
+#define TTM_NUM_MEM_TYPES 8
+
+#define TTM_BO_PRIV_FLAG_MOVING  0	/* Buffer object is moving and needs
+					   idling before CPU mapping */
+#define TTM_BO_PRIV_FLAG_MAX 1
+/**
+ * struct ttm_bo_device - Buffer object driver device-specific data.
+ *
+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
+ * @man: An array of mem_type_managers.
+ * @fence_lock: Protects the synchronizing members on *all* bos belonging
+ * to this device.
+ * @addr_space_mm: Range manager for the device address space.
+ * lru_lock: Spinlock that protects the buffer+device lru lists and
+ * ddestroy lists.
+ * @val_seq: Current validation sequence.
+ * @dev_mapping: A pointer to the struct address_space representing the
+ * device address space.
+ * @wq: Work queue structure for the delayed delete workqueue.
+ *
+ */
+
+struct ttm_bo_device {
+
+	/*
+	 * Constant after bo device init / atomic.
+	 */
+	struct list_head device_list;
+	struct ttm_bo_global *glob;
+	struct ttm_bo_driver *driver;
+	struct rwlock vm_lock;
+	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+	struct mtx fence_lock;
+	/*
+	 * Protected by the vm lock.
+	 */
+	RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
+	struct drm_mm addr_space_mm;
+
+	/*
+	 * Protected by the global:lru lock.
+	 */
+	struct list_head ddestroy;
+	uint32_t val_seq;
+
+	/*
+	 * Protected by load / firstopen / lastclose /unload sync.
+	 */
+
+	struct address_space *dev_mapping;
+
+	/*
+	 * Internal protection.
+	 */
+
+	struct timeout_task wq;
+
+	bool need_dma32;
+};
+
+/**
+ * ttm_flag_masked
+ *
+ * @old: Pointer to the result and original value.
+ * @new: New value of bits.
+ * @mask: Mask of bits to change.
+ *
+ * Convenience function to change a number of bits identified by a mask.
+ */
+
+static inline uint32_t
+ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
+{
+	*old ^= (*old ^ new) & mask;
+	return *old;
+}
+
+/**
+ * ttm_tt_init
+ *
+ * @ttm: The struct ttm_tt.
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+			unsigned long size, uint32_t page_flags,
+			struct vm_page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+			   unsigned long size, uint32_t page_flags,
+			   struct vm_page *dummy_read_page);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: the ttm_tt structure.
+ *
+ * Free memory of ttm_tt structure
+ */
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
+
+/**
+ * ttm_ttm_bind:
+ *
+ * @ttm: The struct ttm_tt containing backing pages.
+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ *
+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
+ */
+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind, unpopulate and destroy common struct ttm_tt.
+ */
+extern void ttm_tt_destroy(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+extern void ttm_tt_unbind(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_swapin:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Swap in a previously swap out ttm_tt.
+ */
+extern int ttm_tt_swapin(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_cache_flush:
+ *
+ * @pages: An array of pointers to struct page:s to flush.
+ * @num_pages: Number of pages to flush.
+ *
+ * Flush the data of the indicated pages from the cpu caches.
+ * This is used when changing caching attributes of the pages from
+ * cache-coherent.
+ */
+extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
+
+/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+ * @placement: Flag indicating the desired caching policy.
+ *
+ * This function will change caching policy of any default kernel mappings of
+ * the pages backing @ttm. If changing from cached to uncached or
+ * write-combined,
+ * all CPU caches will first be flushed to make sure the data of the pages
+ * hit RAM. This function may be very costly as it involves global TLB
+ * and cache flushes and potential page splitting / combining.
+ */
+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
+extern int ttm_tt_swapout(struct ttm_tt *ttm,
+			  struct vm_object *persistent_swap_storage);
+
+/*
+ * ttm_bo.c
+ */
+
+/**
+ * ttm_mem_reg_is_pci
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @mem: A valid struct ttm_mem_reg.
+ *
+ * Returns true if the memory described by @mem is PCI memory,
+ * false otherwise.
+ */
+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
+				   struct ttm_mem_reg *mem);
+
+/**
+ * ttm_bo_mem_space
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
+ * we want to allocate space for.
+ * @proposed_placement: Proposed new placement for the buffer object.
+ * @mem: A struct ttm_mem_reg.
+ * @interruptible: Sleep interruptible when sliping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Allocate memory space for the buffer object pointed to by @bo, using
+ * the placement flags in @mem, potentially evicting other idle buffer objects.
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
+ */
+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement,
+				struct ttm_mem_reg *mem,
+				bool interruptible,
+				bool no_wait_gpu);
+
+extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
+			   struct ttm_mem_reg *mem);
+extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
+				  struct ttm_mem_reg *mem);
+
+extern void ttm_bo_global_release(struct drm_global_reference *ref);
+extern int ttm_bo_global_init(struct drm_global_reference *ref);
+
+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_device_init
+ *
+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
+ * @glob: A pointer to an initialized struct ttm_bo_global.
+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @file_page_offset: Offset into the device address space that is available
+ * for buffer data. This ensures compatibility with other users of the
+ * address space.
+ *
+ * Initializes a struct ttm_bo_device:
+ * Returns:
+ * !0: Failure.
+ */
+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
+			      struct ttm_bo_global *glob,
+			      struct ttm_bo_driver *driver,
+			      uint64_t file_page_offset, bool need_dma32);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
+extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ *
+ * The caller must take ttm_mem_io_lock before calling this function.
+ */
+extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+
+extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
+			   bool interruptible);
+extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+
+
+/**
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation) and removes it from lru lists, while taking
+ * a number of measures to prevent deadlocks.
+ *
+ * Deadlocks may occur when two processes try to reserve multiple buffers in
+ * different order, either by will or as a result of a buffer being evicted
+ * to make room for a buffer already reserved. (Buffers are reserved before
+ * they are evicted). The following algorithm prevents such deadlocks from
+ * occurring:
+ * Processes attempting to reserve multiple buffers other than for eviction,
+ * (typically execbuf), should first obtain a unique 32-bit
+ * validation sequence number,
+ * and call this function with @use_sequence == 1 and @sequence == the unique
+ * sequence number. If upon call of this function, the buffer object is already
+ * reserved, the validation sequence is checked against the validation
+ * sequence of the process currently reserving the buffer,
+ * and if the current validation sequence is greater than that of the process
+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
+ * waiting for the buffer to become unreserved, after which it retries
+ * reserving.
+ * The caller should, when receiving an -EAGAIN error
+ * release all its buffer reservations, wait for @bo to become unreserved, and
+ * then rerun the validation with the same validation sequence. This procedure
+ * will always guarantee that the process with the lowest validation sequence
+ * will eventually succeed, preventing both deadlocks and starvation.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
+			  bool interruptible,
+			  bool no_wait, bool use_sequence, uint32_t sequence);
+
+/**
+ * ttm_bo_reserve_slowpath_nolru:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ *
+ * Will not remove reserved buffers from the lru lists.
+ * Otherwise identical to ttm_bo_reserve_slowpath.
+ */
+extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+					 bool interruptible,
+					 uint32_t sequence);
+
+
+/**
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ */
+extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+				   bool interruptible, uint32_t sequence);
+
+/**
+ * ttm_bo_reserve_nolru:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Will not remove reserved buffers from the lru lists.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
+				 bool interruptible,
+				 bool no_wait, bool use_sequence,
+				 uint32_t sequence);
+
+/**
+ * ttm_bo_unreserve
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ */
+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_unreserve_locked
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ * Needs to be called with struct ttm_bo_global::lru_lock held.
+ */
+extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
+
+/*
+ * ttm_bo_util.c
+ */
+
+/**
+ * ttm_bo_move_ttm
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Optimized move function for a buffer object with both old and
+ * new placement backed by a TTM. The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+			   bool evict, bool no_wait_gpu,
+			   struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_move_memcpy
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+			      bool evict, bool no_wait_gpu,
+			      struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_free_old_node
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Utility function to free an old placement after a successful move.
+ */
+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @sync_obj: A sync object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
+
+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+				     void *sync_obj,
+				     bool evict, bool no_wait_gpu,
+				     struct ttm_mem_reg *new_mem);
+/**
+ * ttm_io_prot
+ *
+ * @c_state: Caching state.
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
+extern vm_memattr_t ttm_io_prot(uint32_t caching_flags);
+
+extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+
+#if __OS_HAS_AGP
+#define TTM_HAS_AGP
+
+/**
+ * ttm_agp_tt_create
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @bridge: The agp bridge this device is sitting on.
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ *
+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
+ * for TT memory. This function uses the linux agpgart interface to
+ * bind and unbind memory backing a ttm_tt.
+ */
+extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+					device_t bridge,
+					unsigned long size, uint32_t page_flags,
+					struct vm_page *dummy_read_page);
+int ttm_agp_tt_populate(struct ttm_tt *ttm);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+#endif
+
+int	ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
+	    struct ttm_buffer_object *b);
+
+RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
+    ttm_bo_cmp_rb_tree_items);
+
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_bo_driver.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_bo_manager.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_bo_manager.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_bo_manager.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,158 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_bo_manager.c 247835 2013-03-05 09:49:34Z kib $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/drm2/drm_mm.h>
+
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+	struct drm_mm mm;
+	struct mtx lock;
+};
+
+MALLOC_DEFINE(M_TTM_RMAN, "ttm_rman", "TTM Range Manager");
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+			       struct ttm_buffer_object *bo,
+			       struct ttm_placement *placement,
+			       struct ttm_mem_reg *mem)
+{
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct drm_mm *mm = &rman->mm;
+	struct drm_mm_node *node = NULL;
+	unsigned long lpfn;
+	int ret;
+
+	lpfn = placement->lpfn;
+	if (!lpfn)
+		lpfn = man->size;
+	do {
+		ret = drm_mm_pre_get(mm);
+		if (unlikely(ret))
+			return ret;
+
+		mtx_lock(&rman->lock);
+		node = drm_mm_search_free_in_range(mm,
+					mem->num_pages, mem->page_alignment,
+					placement->fpfn, lpfn, 1);
+		if (unlikely(node == NULL)) {
+			mtx_unlock(&rman->lock);
+			return 0;
+		}
+		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
+						     mem->page_alignment,
+						     placement->fpfn,
+						     lpfn);
+		mtx_unlock(&rman->lock);
+	} while (node == NULL);
+
+	mem->mm_node = node;
+	mem->start = node->start;
+	return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+				struct ttm_mem_reg *mem)
+{
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+	if (mem->mm_node) {
+		mtx_lock(&rman->lock);
+		drm_mm_put_block(mem->mm_node);
+		mtx_unlock(&rman->lock);
+		mem->mm_node = NULL;
+	}
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+			   unsigned long p_size)
+{
+	struct ttm_range_manager *rman;
+	int ret;
+
+	rman = malloc(sizeof(*rman), M_TTM_RMAN, M_ZERO | M_WAITOK);
+	ret = drm_mm_init(&rman->mm, 0, p_size);
+	if (ret) {
+		free(rman, M_TTM_RMAN);
+		return ret;
+	}
+
+	mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF);
+	man->priv = rman;
+	return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct drm_mm *mm = &rman->mm;
+
+	mtx_lock(&rman->lock);
+	if (drm_mm_clean(mm)) {
+		drm_mm_takedown(mm);
+		mtx_unlock(&rman->lock);
+		mtx_destroy(&rman->lock);
+		free(rman, M_TTM_RMAN);
+		man->priv = NULL;
+		return 0;
+	}
+	mtx_unlock(&rman->lock);
+	return -EBUSY;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+			     const char *prefix)
+{
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+	mtx_lock(&rman->lock);
+	drm_mm_debug_table(&rman->mm, prefix);
+	mtx_unlock(&rman->lock);
+}
+
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+	ttm_bo_man_init,
+	ttm_bo_man_takedown,
+	ttm_bo_man_get_node,
+	ttm_bo_man_put_node,
+	ttm_bo_man_debug
+};


Property changes on: trunk/sys/dev/drm2/ttm/ttm_bo_manager.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_bo_util.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_bo_util.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_bo_util.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,668 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_bo_util.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <sys/sf_buf.h>
+
+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
+{
+	ttm_bo_mem_put(bo, &bo->mem);
+}
+
+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+		    bool evict,
+		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+	struct ttm_tt *ttm = bo->ttm;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int ret;
+
+	if (old_mem->mem_type != TTM_PL_SYSTEM) {
+		ttm_tt_unbind(ttm);
+		ttm_bo_free_old_node(bo);
+		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
+				TTM_PL_MASK_MEM);
+		old_mem->mem_type = TTM_PL_SYSTEM;
+	}
+
+	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (new_mem->mem_type != TTM_PL_SYSTEM) {
+		ret = ttm_tt_bind(ttm, new_mem);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+
+	return 0;
+}
+
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
+{
+	if (likely(man->io_reserve_fastpath))
+		return 0;
+
+	if (interruptible) {
+		if (sx_xlock_sig(&man->io_reserve_mutex))
+			return (-EINTR);
+		else
+			return (0);
+	}
+
+	sx_xlock(&man->io_reserve_mutex);
+	return 0;
+}
+
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+	if (likely(man->io_reserve_fastpath))
+		return;
+
+	sx_xunlock(&man->io_reserve_mutex);
+}
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+	struct ttm_buffer_object *bo;
+
+	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+		return -EAGAIN;
+
+	bo = list_first_entry(&man->io_reserve_lru,
+			      struct ttm_buffer_object,
+			      io_reserve_lru);
+	list_del_init(&bo->io_reserve_lru);
+	ttm_bo_unmap_virtual_locked(bo);
+
+	return 0;
+}
+
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+			      struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	int ret = 0;
+
+	if (!bdev->driver->io_mem_reserve)
+		return 0;
+	if (likely(man->io_reserve_fastpath))
+		return bdev->driver->io_mem_reserve(bdev, mem);
+
+	if (bdev->driver->io_mem_reserve &&
+	    mem->bus.io_reserved_count++ == 0) {
+retry:
+		ret = bdev->driver->io_mem_reserve(bdev, mem);
+		if (ret == -EAGAIN) {
+			ret = ttm_mem_io_evict(man);
+			if (ret == 0)
+				goto retry;
+		}
+	}
+	return ret;
+}
+
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+			    struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+	if (likely(man->io_reserve_fastpath))
+		return;
+
+	if (bdev->driver->io_mem_reserve &&
+	    --mem->bus.io_reserved_count == 0 &&
+	    bdev->driver->io_mem_free)
+		bdev->driver->io_mem_free(bdev, mem);
+
+}
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
+	int ret;
+
+	if (!mem->bus.io_reserved_vm) {
+		struct ttm_mem_type_manager *man =
+			&bo->bdev->man[mem->mem_type];
+
+		ret = ttm_mem_io_reserve(bo->bdev, mem);
+		if (unlikely(ret != 0))
+			return ret;
+		mem->bus.io_reserved_vm = true;
+		if (man->use_io_reserve_lru)
+			list_add_tail(&bo->io_reserve_lru,
+				      &man->io_reserve_lru);
+	}
+	return 0;
+}
+
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
+
+	if (mem->bus.io_reserved_vm) {
+		mem->bus.io_reserved_vm = false;
+		list_del_init(&bo->io_reserve_lru);
+		ttm_mem_io_free(bo->bdev, mem);
+	}
+}
+
+static
+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+			void **virtual)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	int ret;
+	void *addr;
+
+	*virtual = NULL;
+	(void) ttm_mem_io_lock(man, false);
+	ret = ttm_mem_io_reserve(bdev, mem);
+	ttm_mem_io_unlock(man);
+	if (ret || !mem->bus.is_iomem)
+		return ret;
+
+	if (mem->bus.addr) {
+		addr = mem->bus.addr;
+	} else {
+		addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
+		    mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
+		    VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
+		if (!addr) {
+			(void) ttm_mem_io_lock(man, false);
+			ttm_mem_io_free(bdev, mem);
+			ttm_mem_io_unlock(man);
+			return -ENOMEM;
+		}
+	}
+	*virtual = addr;
+	return 0;
+}
+
+static
+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+			 void *virtual)
+{
+	struct ttm_mem_type_manager *man;
+
+	man = &bdev->man[mem->mem_type];
+
+	if (virtual && mem->bus.addr == NULL)
+		pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(bdev, mem);
+	ttm_mem_io_unlock(man);
+}
+
+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+	uint32_t *dstP =
+	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
+	uint32_t *srcP =
+	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+
+	int i;
+	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+		/* iowrite32(ioread32(srcP++), dstP++); */
+		*dstP++ = *srcP++;
+	return 0;
+}
+
+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
+				unsigned long page,
+				vm_memattr_t prot)
+{
+	vm_page_t d = ttm->pages[page];
+	void *dst;
+
+	if (!d)
+		return -ENOMEM;
+
+	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+
+	/* XXXKIB can't sleep ? */
+	dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
+	if (!dst)
+		return -ENOMEM;
+
+	memcpy(dst, src, PAGE_SIZE);
+
+	pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
+
+	return 0;
+}
+
+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
+				unsigned long page,
+				vm_memattr_t prot)
+{
+	vm_page_t s = ttm->pages[page];
+	void *src;
+
+	if (!s)
+		return -ENOMEM;
+
+	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+	src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
+	if (!src)
+		return -ENOMEM;
+
+	memcpy(dst, src, PAGE_SIZE);
+
+	pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
+
+	return 0;
+}
+
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+		       bool evict, bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+	struct ttm_tt *ttm = bo->ttm;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg old_copy = *old_mem;
+	void *old_iomap;
+	void *new_iomap;
+	int ret;
+	unsigned long i;
+	unsigned long page;
+	unsigned long add = 0;
+	int dir;
+
+	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+	if (ret)
+		return ret;
+	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+	if (ret)
+		goto out;
+
+	if (old_iomap == NULL && new_iomap == NULL)
+		goto out2;
+	if (old_iomap == NULL && ttm == NULL)
+		goto out2;
+
+	if (ttm->state == tt_unpopulated) {
+		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+		if (ret) {
+			/* if we fail here don't nuke the mm node
+			 * as the bo still owns it */
+			old_copy.mm_node = NULL;
+			goto out1;
+		}
+	}
+
+	add = 0;
+	dir = 1;
+
+	if ((old_mem->mem_type == new_mem->mem_type) &&
+	    (new_mem->start < old_mem->start + old_mem->size)) {
+		dir = -1;
+		add = new_mem->num_pages - 1;
+	}
+
+	for (i = 0; i < new_mem->num_pages; ++i) {
+		page = i * dir + add;
+		if (old_iomap == NULL) {
+			vm_memattr_t prot = ttm_io_prot(old_mem->placement);
+			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+						   prot);
+		} else if (new_iomap == NULL) {
+			vm_memattr_t prot = ttm_io_prot(new_mem->placement);
+			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+						   prot);
+		} else
+			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+		if (ret) {
+			/* failing here, means keep old copy as-is */
+			old_copy.mm_node = NULL;
+			goto out1;
+		}
+	}
+	mb();
+out2:
+	old_copy = *old_mem;
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+
+	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
+		ttm_tt_unbind(ttm);
+		ttm_tt_destroy(ttm);
+		bo->ttm = NULL;
+	}
+
+out1:
+	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
+out:
+	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+	ttm_bo_mem_put(bo, &old_copy);
+	return ret;
+}
+
+MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects");
+
+static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+{
+	free(bo, M_TTM_TRANSF_OBJ);
+}
+
+/**
+ * ttm_buffer_object_transfer
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
+ * holding the data of @bo with the old placement.
+ *
+ * This is a utility function that may be called after an accelerated move
+ * has been scheduled. A new buffer object is created as a placeholder for
+ * the old data while it's being copied. When that buffer object is idle,
+ * it can be destroyed, releasing the space of the old placement.
+ * Returns:
+ * !0: Failure.
+ */
+
+static int
+ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+    struct ttm_buffer_object **new_obj)
+{
+	struct ttm_buffer_object *fbo;
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+
+	fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_WAITOK);
+	*fbo = *bo;
+
+	/**
+	 * Fix up members that we shouldn't copy directly:
+	 * TODO: Explicit member copy would probably be better here.
+	 */
+
+	INIT_LIST_HEAD(&fbo->ddestroy);
+	INIT_LIST_HEAD(&fbo->lru);
+	INIT_LIST_HEAD(&fbo->swap);
+	INIT_LIST_HEAD(&fbo->io_reserve_lru);
+	fbo->vm_node = NULL;
+	atomic_set(&fbo->cpu_writers, 0);
+
+	mtx_lock(&bdev->fence_lock);
+	if (bo->sync_obj)
+		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+	else
+		fbo->sync_obj = NULL;
+	mtx_unlock(&bdev->fence_lock);
+	refcount_init(&fbo->list_kref, 1);
+	refcount_init(&fbo->kref, 1);
+	fbo->destroy = &ttm_transfered_destroy;
+	fbo->acc_size = 0;
+
+	*new_obj = fbo;
+	return 0;
+}
+
+vm_memattr_t
+ttm_io_prot(uint32_t caching_flags)
+{
+#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
+	if (caching_flags & TTM_PL_FLAG_WC)
+		return (VM_MEMATTR_WRITE_COMBINING);
+	else
+		/*
+		 * We do not support i386, look at the linux source
+		 * for the reason of the comment.
+		 */
+		return (VM_MEMATTR_UNCACHEABLE);
+#else
+#error Port me
+#endif
+}
+
+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
+			  unsigned long offset,
+			  unsigned long size,
+			  struct ttm_bo_kmap_obj *map)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
+
+	if (bo->mem.bus.addr) {
+		map->bo_kmap_type = ttm_bo_map_premapped;
+		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+	} else {
+		map->bo_kmap_type = ttm_bo_map_iomap;
+		map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
+		    bo->mem.bus.offset + offset, size,
+		    (mem->placement & TTM_PL_FLAG_WC) ?
+		    VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
+		map->size = size;
+	}
+	return (!map->virtual) ? -ENOMEM : 0;
+}
+
+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+			   unsigned long start_page,
+			   unsigned long num_pages,
+			   struct ttm_bo_kmap_obj *map)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
+	vm_memattr_t prot;
+	struct ttm_tt *ttm = bo->ttm;
+	int i, ret;
+
+	MPASS(ttm != NULL);
+
+	if (ttm->state == tt_unpopulated) {
+		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+		if (ret)
+			return ret;
+	}
+
+	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+		/*
+		 * We're mapping a single page, and the desired
+		 * page protection is consistent with the bo.
+		 */
+
+		map->bo_kmap_type = ttm_bo_map_kmap;
+		map->page = ttm->pages[start_page];
+		map->sf = sf_buf_alloc(map->page, 0);
+		map->virtual = (void *)sf_buf_kva(map->sf);
+	} else {
+		/*
+		 * We need to use vmap to get the desired page protection
+		 * or to make the buffer object look contiguous.
+		 */
+		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+			VM_MEMATTR_DEFAULT : ttm_io_prot(mem->placement);
+		map->bo_kmap_type = ttm_bo_map_vmap;
+		map->num_pages = num_pages;
+		map->virtual = (void *)kva_alloc(num_pages * PAGE_SIZE);
+		if (map->virtual != NULL) {
+			for (i = 0; i < num_pages; i++) {
+				/* XXXKIB hack */
+				pmap_page_set_memattr(ttm->pages[start_page +
+				    i], prot);
+			}
+			pmap_qenter((vm_offset_t)map->virtual,
+			    &ttm->pages[start_page], num_pages);
+		}
+	}
+	return (!map->virtual) ? -ENOMEM : 0;
+}
+
+int ttm_bo_kmap(struct ttm_buffer_object *bo,
+		unsigned long start_page, unsigned long num_pages,
+		struct ttm_bo_kmap_obj *map)
+{
+	struct ttm_mem_type_manager *man =
+		&bo->bdev->man[bo->mem.mem_type];
+	unsigned long offset, size;
+	int ret;
+
+	MPASS(list_empty(&bo->swap));
+	map->virtual = NULL;
+	map->bo = bo;
+	if (num_pages > bo->num_pages)
+		return -EINVAL;
+	if (start_page > bo->num_pages)
+		return -EINVAL;
+#if 0
+	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+		return -EPERM;
+#endif
+	(void) ttm_mem_io_lock(man, false);
+	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+	ttm_mem_io_unlock(man);
+	if (ret)
+		return ret;
+	if (!bo->mem.bus.is_iomem) {
+		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
+	} else {
+		offset = start_page << PAGE_SHIFT;
+		size = num_pages << PAGE_SHIFT;
+		return ttm_bo_ioremap(bo, offset, size, map);
+	}
+}
+
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
+{
+	struct ttm_buffer_object *bo = map->bo;
+	struct ttm_mem_type_manager *man =
+		&bo->bdev->man[bo->mem.mem_type];
+
+	if (!map->virtual)
+		return;
+	switch (map->bo_kmap_type) {
+	case ttm_bo_map_iomap:
+		pmap_unmapdev((vm_offset_t)map->virtual, map->size);
+		break;
+	case ttm_bo_map_vmap:
+		pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
+		kva_free((vm_offset_t)map->virtual,
+		    map->num_pages * PAGE_SIZE);
+		break;
+	case ttm_bo_map_kmap:
+		sf_buf_free(map->sf);
+		break;
+	case ttm_bo_map_premapped:
+		break;
+	default:
+		MPASS(0);
+	}
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+	ttm_mem_io_unlock(man);
+	map->virtual = NULL;
+	map->page = NULL;
+	map->sf = NULL;
+}
+
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+			      void *sync_obj,
+			      bool evict,
+			      bool no_wait_gpu,
+			      struct ttm_mem_reg *new_mem)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int ret;
+	struct ttm_buffer_object *ghost_obj;
+	void *tmp_obj = NULL;
+
+	mtx_lock(&bdev->fence_lock);
+	if (bo->sync_obj) {
+		tmp_obj = bo->sync_obj;
+		bo->sync_obj = NULL;
+	}
+	bo->sync_obj = driver->sync_obj_ref(sync_obj);
+	if (evict) {
+		ret = ttm_bo_wait(bo, false, false, false);
+		mtx_unlock(&bdev->fence_lock);
+		if (tmp_obj)
+			driver->sync_obj_unref(&tmp_obj);
+		if (ret)
+			return ret;
+
+		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+		    (bo->ttm != NULL)) {
+			ttm_tt_unbind(bo->ttm);
+			ttm_tt_destroy(bo->ttm);
+			bo->ttm = NULL;
+		}
+		ttm_bo_free_old_node(bo);
+	} else {
+		/**
+		 * This should help pipeline ordinary buffer moves.
+		 *
+		 * Hang old buffer memory on a new buffer object,
+		 * and leave it to be released when the GPU
+		 * operation has completed.
+		 */
+
+		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+		mtx_unlock(&bdev->fence_lock);
+		if (tmp_obj)
+			driver->sync_obj_unref(&tmp_obj);
+
+		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+		if (ret)
+			return ret;
+
+		/**
+		 * If we're not moving to fixed memory, the TTM object
+		 * needs to stay alive. Otherwhise hang it on the ghost
+		 * bo to be unbound and destroyed.
+		 */
+
+		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
+			ghost_obj->ttm = NULL;
+		else
+			bo->ttm = NULL;
+
+		ttm_bo_unreserve(ghost_obj);
+		ttm_bo_unref(&ghost_obj);
+	}
+
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/ttm/ttm_bo_util.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_bo_vm.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_bo_vm.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_bo_vm.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,567 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib at FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_bo_vm.c 307672 2016-10-20 13:12:19Z kib $");
+
+#include "opt_vm.h"
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
+    ttm_bo_cmp_rb_tree_items);
+
+int
+ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
+    struct ttm_buffer_object *b)
+{
+
+	if (a->vm_node->start < b->vm_node->start) {
+		return (-1);
+	} else if (a->vm_node->start > b->vm_node->start) {
+		return (1);
+	} else {
+		return (0);
+	}
+}
+
+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
+						     unsigned long page_start,
+						     unsigned long num_pages)
+{
+	unsigned long cur_offset;
+	struct ttm_buffer_object *bo;
+	struct ttm_buffer_object *best_bo = NULL;
+
+	bo = RB_ROOT(&bdev->addr_space_rb);
+	while (bo != NULL) {
+		cur_offset = bo->vm_node->start;
+		if (page_start >= cur_offset) {
+			best_bo = bo;
+			if (page_start == cur_offset)
+				break;
+			bo = RB_RIGHT(bo, vm_rb);
+		} else
+			bo = RB_LEFT(bo, vm_rb);
+	}
+
+	if (unlikely(best_bo == NULL))
+		return NULL;
+
+	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
+		     (page_start + num_pages)))
+		return NULL;
+
+	return best_bo;
+}
+
+static int
+ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
+    int prot, vm_page_t *mres)
+{
+
+	struct ttm_buffer_object *bo = vm_obj->handle;
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_tt *ttm = NULL;
+	vm_page_t m, m1, oldm;
+	int ret;
+	int retval = VM_PAGER_OK;
+	struct ttm_mem_type_manager *man =
+		&bdev->man[bo->mem.mem_type];
+
+	vm_object_pip_add(vm_obj, 1);
+	oldm = *mres;
+	if (oldm != NULL) {
+		vm_page_lock(oldm);
+		vm_page_remove(oldm);
+		vm_page_unlock(oldm);
+		*mres = NULL;
+	} else
+		oldm = NULL;
+retry:
+	VM_OBJECT_WUNLOCK(vm_obj);
+	m = NULL;
+
+reserve:
+	ret = ttm_bo_reserve(bo, false, false, false, 0);
+	if (unlikely(ret != 0)) {
+		if (ret == -EBUSY) {
+			kern_yield(0);
+			goto reserve;
+		}
+	}
+
+	if (bdev->driver->fault_reserve_notify) {
+		ret = bdev->driver->fault_reserve_notify(bo);
+		switch (ret) {
+		case 0:
+			break;
+		case -EBUSY:
+		case -ERESTARTSYS:
+		case -EINTR:
+			kern_yield(0);
+			goto reserve;
+		default:
+			retval = VM_PAGER_ERROR;
+			goto out_unlock;
+		}
+	}
+
+	/*
+	 * Wait for buffer data in transit, due to a pipelined
+	 * move.
+	 */
+
+	mtx_lock(&bdev->fence_lock);
+	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
+		/*
+		 * Here, the behavior differs between Linux and FreeBSD.
+		 *
+		 * On Linux, the wait is interruptible (3rd argument to
+		 * ttm_bo_wait). There must be some mechanism to resume
+		 * page fault handling, once the signal is processed.
+		 *
+		 * On FreeBSD, the wait is uninteruptible. This is not a
+		 * problem as we can't end up with an unkillable process
+		 * here, because the wait will eventually time out.
+		 *
+		 * An example of this situation is the Xorg process
+		 * which uses SIGALRM internally. The signal could
+		 * interrupt the wait, causing the page fault to fail
+		 * and the process to receive SIGSEGV.
+		 */
+		ret = ttm_bo_wait(bo, false, false, false);
+		mtx_unlock(&bdev->fence_lock);
+		if (unlikely(ret != 0)) {
+			retval = VM_PAGER_ERROR;
+			goto out_unlock;
+		}
+	} else
+		mtx_unlock(&bdev->fence_lock);
+
+	ret = ttm_mem_io_lock(man, true);
+	if (unlikely(ret != 0)) {
+		retval = VM_PAGER_ERROR;
+		goto out_unlock;
+	}
+	ret = ttm_mem_io_reserve_vm(bo);
+	if (unlikely(ret != 0)) {
+		retval = VM_PAGER_ERROR;
+		goto out_io_unlock;
+	}
+
+	/*
+	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
+	 * since the mmap_sem is only held in read mode. However, we
+	 * modify only the caching bits of vma->vm_page_prot and
+	 * consider those bits protected by
+	 * the bo->mutex, as we should be the only writers.
+	 * There shouldn't really be any readers of these bits except
+	 * within vm_insert_mixed()? fork?
+	 *
+	 * TODO: Add a list of vmas to the bo, and change the
+	 * vma->vm_page_prot when the object changes caching policy, with
+	 * the correct locks held.
+	 */
+	if (!bo->mem.bus.is_iomem) {
+		/* Allocate all page at once, most common usage */
+		ttm = bo->ttm;
+		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+			retval = VM_PAGER_ERROR;
+			goto out_io_unlock;
+		}
+	}
+
+	if (bo->mem.bus.is_iomem) {
+		m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
+		    offset);
+		KASSERT((m->flags & PG_FICTITIOUS) != 0,
+		    ("physical address %#jx not fictitious",
+		    (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
+		    + offset)));
+		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
+	} else {
+		ttm = bo->ttm;
+		m = ttm->pages[OFF_TO_IDX(offset)];
+		if (unlikely(!m)) {
+			retval = VM_PAGER_ERROR;
+			goto out_io_unlock;
+		}
+		pmap_page_set_memattr(m,
+		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
+		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
+	}
+
+	VM_OBJECT_WLOCK(vm_obj);
+	if (vm_page_busied(m)) {
+		vm_page_lock(m);
+		VM_OBJECT_WUNLOCK(vm_obj);
+		vm_page_busy_sleep(m, "ttmpbs", false);
+		VM_OBJECT_WLOCK(vm_obj);
+		ttm_mem_io_unlock(man);
+		ttm_bo_unreserve(bo);
+		goto retry;
+	}
+	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
+	if (m1 == NULL) {
+		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
+			VM_OBJECT_WUNLOCK(vm_obj);
+			VM_WAIT;
+			VM_OBJECT_WLOCK(vm_obj);
+			ttm_mem_io_unlock(man);
+			ttm_bo_unreserve(bo);
+			goto retry;
+		}
+	} else {
+		KASSERT(m == m1,
+		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
+		    bo, m, m1, (uintmax_t)offset));
+	}
+	m->valid = VM_PAGE_BITS_ALL;
+	*mres = m;
+	vm_page_xbusy(m);
+
+	if (oldm != NULL) {
+		vm_page_lock(oldm);
+		vm_page_free(oldm);
+		vm_page_unlock(oldm);
+	}
+
+out_io_unlock1:
+	ttm_mem_io_unlock(man);
+out_unlock1:
+	ttm_bo_unreserve(bo);
+	vm_object_pip_wakeup(vm_obj);
+	return (retval);
+
+out_io_unlock:
+	VM_OBJECT_WLOCK(vm_obj);
+	goto out_io_unlock1;
+
+out_unlock:
+	VM_OBJECT_WLOCK(vm_obj);
+	goto out_unlock1;
+}
+
+static int
+ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+    vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+
+	/*
+	 * On Linux, a reference to the buffer object is acquired here.
+	 * The reason is that this function is not called when the
+	 * mmap() is initialized, but only when a process forks for
+	 * instance. Therefore on Linux, the reference on the bo is
+	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
+	 * then released in ttm_bo_vm_close().
+	 *
+	 * Here, this function is called during mmap() initialization.
+	 * Thus, the reference acquired in ttm_bo_mmap_single() is
+	 * sufficient.
+	 */
+
+	*color = 0;
+	return (0);
+}
+
+static void
+ttm_bo_vm_dtor(void *handle)
+{
+	struct ttm_buffer_object *bo = handle;
+
+	ttm_bo_unref(&bo);
+}
+
+static struct cdev_pager_ops ttm_pager_ops = {
+	.cdev_pg_fault = ttm_bo_vm_fault,
+	.cdev_pg_ctor = ttm_bo_vm_ctor,
+	.cdev_pg_dtor = ttm_bo_vm_dtor
+};
+
+int
+ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
+    struct vm_object **obj_res, int nprot)
+{
+	struct ttm_bo_driver *driver;
+	struct ttm_buffer_object *bo;
+	struct vm_object *vm_obj;
+	int ret;
+
+	rw_wlock(&bdev->vm_lock);
+	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
+	if (likely(bo != NULL))
+		refcount_acquire(&bo->kref);
+	rw_wunlock(&bdev->vm_lock);
+
+	if (unlikely(bo == NULL)) {
+		printf("[TTM] Could not find buffer object to map\n");
+		return (-EINVAL);
+	}
+
+	driver = bo->bdev->driver;
+	if (unlikely(!driver->verify_access)) {
+		ret = -EPERM;
+		goto out_unref;
+	}
+	ret = driver->verify_access(bo);
+	if (unlikely(ret != 0))
+		goto out_unref;
+
+	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
+	    size, nprot, 0, curthread->td_ucred);
+	if (vm_obj == NULL) {
+		ret = -EINVAL;
+		goto out_unref;
+	}
+	/*
+	 * Note: We're transferring the bo reference to vm_obj->handle here.
+	 */
+	*offset = 0;
+	*obj_res = vm_obj;
+	return 0;
+out_unref:
+	ttm_bo_unref(&bo);
+	return ret;
+}
+
+void
+ttm_bo_release_mmap(struct ttm_buffer_object *bo)
+{
+	vm_object_t vm_obj;
+	vm_page_t m;
+	int i;
+
+	vm_obj = cdev_pager_lookup(bo);
+	if (vm_obj == NULL)
+		return;
+
+	VM_OBJECT_WLOCK(vm_obj);
+retry:
+	for (i = 0; i < bo->num_pages; i++) {
+		m = vm_page_lookup(vm_obj, i);
+		if (m == NULL)
+			continue;
+		if (vm_page_sleep_if_busy(m, "ttm_unm"))
+			goto retry;
+		cdev_pager_free_page(vm_obj, m);
+	}
+	VM_OBJECT_WUNLOCK(vm_obj);
+
+	vm_object_deallocate(vm_obj);
+}
+
+#if 0
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+	if (vma->vm_pgoff != 0)
+		return -EACCES;
+
+	vma->vm_ops = &ttm_bo_vm_ops;
+	vma->vm_private_data = ttm_bo_reference(bo);
+	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+	return 0;
+}
+
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+		  const char __user *wbuf, char __user *rbuf, size_t count,
+		  loff_t *f_pos, bool write)
+{
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_driver *driver;
+	struct ttm_bo_kmap_obj map;
+	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
+	unsigned long kmap_offset;
+	unsigned long kmap_end;
+	unsigned long kmap_num;
+	size_t io_size;
+	unsigned int page_offset;
+	char *virtual;
+	int ret;
+	bool no_wait = false;
+	bool dummy;
+
+	read_lock(&bdev->vm_lock);
+	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
+	if (likely(bo != NULL))
+		ttm_bo_reference(bo);
+	read_unlock(&bdev->vm_lock);
+
+	if (unlikely(bo == NULL))
+		return -EFAULT;
+
+	driver = bo->bdev->driver;
+	if (unlikely(!driver->verify_access)) {
+		ret = -EPERM;
+		goto out_unref;
+	}
+
+	ret = driver->verify_access(bo, filp);
+	if (unlikely(ret != 0))
+		goto out_unref;
+
+	kmap_offset = dev_offset - bo->vm_node->start;
+	if (unlikely(kmap_offset >= bo->num_pages)) {
+		ret = -EFBIG;
+		goto out_unref;
+	}
+
+	page_offset = *f_pos & ~PAGE_MASK;
+	io_size = bo->num_pages - kmap_offset;
+	io_size = (io_size << PAGE_SHIFT) - page_offset;
+	if (count < io_size)
+		io_size = count;
+
+	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+	kmap_num = kmap_end - kmap_offset + 1;
+
+	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -EBUSY:
+		ret = -EAGAIN;
+		goto out_unref;
+	default:
+		goto out_unref;
+	}
+
+	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unreserve(bo);
+		goto out_unref;
+	}
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+	virtual += page_offset;
+
+	if (write)
+		ret = copy_from_user(virtual, wbuf, io_size);
+	else
+		ret = copy_to_user(rbuf, virtual, io_size);
+
+	ttm_bo_kunmap(&map);
+	ttm_bo_unreserve(bo);
+	ttm_bo_unref(&bo);
+
+	if (unlikely(ret != 0))
+		return -EFBIG;
+
+	*f_pos += io_size;
+
+	return io_size;
+out_unref:
+	ttm_bo_unref(&bo);
+	return ret;
+}
+
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+			char __user *rbuf, size_t count, loff_t *f_pos,
+			bool write)
+{
+	struct ttm_bo_kmap_obj map;
+	unsigned long kmap_offset;
+	unsigned long kmap_end;
+	unsigned long kmap_num;
+	size_t io_size;
+	unsigned int page_offset;
+	char *virtual;
+	int ret;
+	bool no_wait = false;
+	bool dummy;
+
+	kmap_offset = (*f_pos >> PAGE_SHIFT);
+	if (unlikely(kmap_offset >= bo->num_pages))
+		return -EFBIG;
+
+	page_offset = *f_pos & ~PAGE_MASK;
+	io_size = bo->num_pages - kmap_offset;
+	io_size = (io_size << PAGE_SHIFT) - page_offset;
+	if (count < io_size)
+		io_size = count;
+
+	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+	kmap_num = kmap_end - kmap_offset + 1;
+
+	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -EBUSY:
+		return -EAGAIN;
+	default:
+		return ret;
+	}
+
+	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unreserve(bo);
+		return ret;
+	}
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+	virtual += page_offset;
+
+	if (write)
+		ret = copy_from_user(virtual, wbuf, io_size);
+	else
+		ret = copy_to_user(rbuf, virtual, io_size);
+
+	ttm_bo_kunmap(&map);
+	ttm_bo_unreserve(bo);
+	ttm_bo_unref(&bo);
+
+	if (unlikely(ret != 0))
+		return ret;
+
+	*f_pos += io_size;
+
+	return io_size;
+}
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_bo_vm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_execbuf_util.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_execbuf_util.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_execbuf_util.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,239 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_execbuf_util.c 254864 2013-08-25 14:52:20Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_execbuf_util.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		if (!entry->reserved)
+			continue;
+
+		if (entry->removed) {
+			ttm_bo_add_to_lru(bo);
+			entry->removed = false;
+
+		}
+		entry->reserved = false;
+		atomic_set(&bo->reserved, 0);
+		wakeup(bo);
+	}
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		if (!entry->reserved)
+			continue;
+
+		if (!entry->removed) {
+			entry->put_count = ttm_bo_del_from_lru(bo);
+			entry->removed = true;
+		}
+	}
+}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+
+		if (entry->put_count) {
+			ttm_bo_list_ref_sub(bo, entry->put_count, true);
+			entry->put_count = 0;
+		}
+	}
+}
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+	struct ttm_bo_global *glob;
+
+	if (list_empty(list))
+		return;
+
+	entry = list_first_entry(list, struct ttm_validate_buffer, head);
+	glob = entry->bo->glob;
+	mtx_lock(&glob->lru_lock);
+	ttm_eu_backoff_reservation_locked(list);
+	mtx_unlock(&glob->lru_lock);
+}
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list)
+{
+	struct ttm_bo_global *glob;
+	struct ttm_validate_buffer *entry;
+	int ret;
+	uint32_t val_seq;
+
+	if (list_empty(list))
+		return 0;
+
+	list_for_each_entry(entry, list, head) {
+		entry->reserved = false;
+		entry->put_count = 0;
+		entry->removed = false;
+	}
+
+	entry = list_first_entry(list, struct ttm_validate_buffer, head);
+	glob = entry->bo->glob;
+
+	mtx_lock(&glob->lru_lock);
+	val_seq = entry->bo->bdev->val_seq++;
+
+retry_locked:
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+
+		/* already slowpath reserved? */
+		if (entry->reserved)
+			continue;
+
+		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
+		switch (ret) {
+		case 0:
+			break;
+		case -EBUSY:
+			ttm_eu_del_from_lru_locked(list);
+			ret = ttm_bo_reserve_nolru(bo, true, false,
+						   true, val_seq);
+			if (!ret)
+				break;
+
+			if (unlikely(ret != -EAGAIN))
+				goto err;
+
+			/* fallthrough */
+		case -EAGAIN:
+			ttm_eu_backoff_reservation_locked(list);
+
+			/*
+			 * temporarily increase sequence number every retry,
+			 * to prevent us from seeing our old reservation
+			 * sequence when someone else reserved the buffer,
+			 * but hasn't updated the seq_valid/seqno members yet.
+			 */
+			val_seq = entry->bo->bdev->val_seq++;
+
+			ttm_eu_list_ref_sub(list);
+			ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
+			if (unlikely(ret != 0)) {
+				mtx_unlock(&glob->lru_lock);
+				return ret;
+			}
+			entry->reserved = true;
+			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+				ret = -EBUSY;
+				goto err;
+			}
+			goto retry_locked;
+		default:
+			goto err;
+		}
+
+		entry->reserved = true;
+		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+			ret = -EBUSY;
+			goto err;
+		}
+	}
+
+	ttm_eu_del_from_lru_locked(list);
+	mtx_unlock(&glob->lru_lock);
+	ttm_eu_list_ref_sub(list);
+
+	return 0;
+
+err:
+	ttm_eu_backoff_reservation_locked(list);
+	mtx_unlock(&glob->lru_lock);
+	ttm_eu_list_ref_sub(list);
+	return ret;
+}
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+	struct ttm_validate_buffer *entry;
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_global *glob;
+	struct ttm_bo_device *bdev;
+	struct ttm_bo_driver *driver;
+
+	if (list_empty(list))
+		return;
+
+	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+	bdev = bo->bdev;
+	driver = bdev->driver;
+	glob = bo->glob;
+
+	mtx_lock(&glob->lru_lock);
+	mtx_lock(&bdev->fence_lock);
+
+	list_for_each_entry(entry, list, head) {
+		bo = entry->bo;
+		entry->old_sync_obj = bo->sync_obj;
+		bo->sync_obj = driver->sync_obj_ref(sync_obj);
+		ttm_bo_unreserve_locked(bo);
+		entry->reserved = false;
+	}
+	mtx_unlock(&bdev->fence_lock);
+	mtx_unlock(&glob->lru_lock);
+
+	list_for_each_entry(entry, list, head) {
+		if (entry->old_sync_obj)
+			driver->sync_obj_unref(&entry->old_sync_obj);
+	}
+}


Property changes on: trunk/sys/dev/drm2/ttm/ttm_execbuf_util.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_execbuf_util.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_execbuf_util.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_execbuf_util.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,110 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_execbuf_util.h 247835 2013-03-05 09:49:34Z kib $ */
+
+#ifndef _TTM_EXECBUF_UTIL_H_
+#define _TTM_EXECBUF_UTIL_H_
+
+#include <dev/drm2/ttm/ttm_bo_api.h>
+
+/**
+ * struct ttm_validate_buffer
+ *
+ * @head:           list head for thread-private list.
+ * @bo:             refcounted buffer object pointer.
+ * @reserved:       Indicates whether @bo has been reserved for validation.
+ * @removed:        Indicates whether @bo has been removed from lru lists.
+ * @put_count:      Number of outstanding references on bo::list_kref.
+ * @old_sync_obj:   Pointer to a sync object about to be unreferenced
+ */
+
+struct ttm_validate_buffer {
+	struct list_head head;
+	struct ttm_buffer_object *bo;
+	bool reserved;
+	bool removed;
+	int put_count;
+	void *old_sync_obj;
+};
+
+/**
+ * function ttm_eu_backoff_reservation
+ *
+ * @list:     thread private list of ttm_validate_buffer structs.
+ *
+ * Undoes all buffer validation reservations for bos pointed to by
+ * the list entries.
+ */
+
+extern void ttm_eu_backoff_reservation(struct list_head *list);
+
+/**
+ * function ttm_eu_reserve_buffers
+ *
+ * @list:    thread private list of ttm_validate_buffer structs.
+ *
+ * Tries to reserve bos pointed to by the list entries for validation.
+ * If the function returns 0, all buffers are marked as "unfenced",
+ * taken off the lru lists and are not synced for write CPU usage.
+ *
+ * If the function detects a deadlock due to multiple threads trying to
+ * reserve the same buffers in reverse order, all threads except one will
+ * back off and retry. This function may sleep while waiting for
+ * CPU write reservations to be cleared, and for other threads to
+ * unreserve their buffers.
+ *
+ * This function may return -ERESTART or -EAGAIN if the calling process
+ * receives a signal while waiting. In that case, no buffers on the list
+ * will be reserved upon return.
+ *
+ * Buffers reserved by this function should be unreserved by
+ * a call to either ttm_eu_backoff_reservation() or
+ * ttm_eu_fence_buffer_objects() when command submission is complete or
+ * has failed.
+ */
+
+extern int ttm_eu_reserve_buffers(struct list_head *list);
+
+/**
+ * function ttm_eu_fence_buffer_objects.
+ *
+ * @list:        thread private list of ttm_validate_buffer structs.
+ * @sync_obj:    The new sync object for the buffers.
+ *
+ * This function should be called when command submission is complete, and
+ * it will add a new sync object to bos pointed to by entries on @list.
+ * It also unreserves all buffers, putting them on lru lists.
+ *
+ */
+
+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
+
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_execbuf_util.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_lock.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_lock.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_lock.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,349 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib at FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_lock.c 282199 2015-04-28 19:35:05Z dumbbell $");
+
+#include <dev/drm2/ttm/ttm_lock.h>
+#include <dev/drm2/ttm/ttm_module.h>
+
+#define TTM_WRITE_LOCK_PENDING    (1 << 0)
+#define TTM_VT_LOCK_PENDING       (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
+#define TTM_VT_LOCK               (1 << 3)
+#define TTM_SUSPEND_LOCK          (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+	mtx_init(&lock->lock, "ttmlk", NULL, MTX_DEF);
+	lock->rw = 0;
+	lock->flags = 0;
+	lock->kill_takers = false;
+	lock->signal = SIGKILL;
+}
+
+static void
+ttm_lock_send_sig(int signo)
+{
+	struct proc *p;
+
+	p = curproc;	/* XXXKIB curthread ? */
+	PROC_LOCK(p);
+	kern_psignal(p, signo);
+	PROC_UNLOCK(p);
+}
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+	mtx_lock(&lock->lock);
+	if (--lock->rw == 0)
+		wakeup(lock);
+	mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	if (unlikely(lock->kill_takers)) {
+		ttm_lock_send_sig(lock->signal);
+		return false;
+	}
+	if (lock->rw >= 0 && lock->flags == 0) {
+		++lock->rw;
+		locked = true;
+	}
+	return locked;
+}
+
+int
+ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+	const char *wmsg;
+	int flags, ret;
+
+	ret = 0;
+	if (interruptible) {
+		flags = PCATCH;
+		wmsg = "ttmri";
+	} else {
+		flags = 0;
+		wmsg = "ttmr";
+	}
+	mtx_lock(&lock->lock);
+	while (!__ttm_read_lock(lock)) {
+		ret = -msleep(lock, &lock->lock, flags, wmsg, 0);
+		if (ret == -EINTR || ret == -ERESTART)
+			ret = -ERESTARTSYS;
+		if (ret != 0)
+			break;
+	}
+	return (ret);
+}
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+	bool block = true;
+
+	*locked = false;
+
+	if (unlikely(lock->kill_takers)) {
+		ttm_lock_send_sig(lock->signal);
+		return false;
+	}
+	if (lock->rw >= 0 && lock->flags == 0) {
+		++lock->rw;
+		block = false;
+		*locked = true;
+	} else if (lock->flags == 0) {
+		block = false;
+	}
+
+	return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+	const char *wmsg;
+	int flags, ret;
+	bool locked;
+
+	ret = 0;
+	if (interruptible) {
+		flags = PCATCH;
+		wmsg = "ttmrti";
+	} else {
+		flags = 0;
+		wmsg = "ttmrt";
+	}
+	mtx_lock(&lock->lock);
+	while (!__ttm_read_trylock(lock, &locked)) {
+		ret = -msleep(lock, &lock->lock, flags, wmsg, 0);
+		if (ret == -EINTR || ret == -ERESTART)
+			ret = -ERESTARTSYS;
+		if (ret != 0)
+			break;
+	}
+	MPASS(!locked || ret == 0);
+	mtx_unlock(&lock->lock);
+
+	return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+	mtx_lock(&lock->lock);
+	lock->rw = 0;
+	wakeup(lock);
+	mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	if (unlikely(lock->kill_takers)) {
+		ttm_lock_send_sig(lock->signal);
+		return false;
+	}
+	if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+		lock->rw = -1;
+		lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+		locked = true;
+	} else {
+		lock->flags |= TTM_WRITE_LOCK_PENDING;
+	}
+	return locked;
+}
+
+int
+ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+	const char *wmsg;
+	int flags, ret;
+
+	ret = 0;
+	if (interruptible) {
+		flags = PCATCH;
+		wmsg = "ttmwi";
+	} else {
+		flags = 0;
+		wmsg = "ttmw";
+	}
+	mtx_lock(&lock->lock);
+	/* XXXKIB: linux uses __ttm_read_lock for uninterruptible sleeps */
+	while (!__ttm_write_lock(lock)) {
+		ret = -msleep(lock, &lock->lock, flags, wmsg, 0);
+		if (ret == -EINTR || ret == -ERESTART)
+			ret = -ERESTARTSYS;
+		if (interruptible && ret != 0) {
+			lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+			wakeup(lock);
+			break;
+		}
+	}
+	mtx_unlock(&lock->lock);
+
+	return (ret);
+}
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+	mtx_lock(&lock->lock);
+	lock->rw = 1;
+	wakeup(lock);
+	mtx_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+	int ret = 0;
+
+	mtx_lock(&lock->lock);
+	if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+		ret = -EINVAL;
+	lock->flags &= ~TTM_VT_LOCK;
+	wakeup(lock);
+	mtx_unlock(&lock->lock);
+
+	return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+	int ret;
+
+	*p_base = NULL;
+	ret = __ttm_vt_unlock(lock);
+	MPASS(ret == 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	if (lock->rw == 0) {
+		lock->flags &= ~TTM_VT_LOCK_PENDING;
+		lock->flags |= TTM_VT_LOCK;
+		locked = true;
+	} else {
+		lock->flags |= TTM_VT_LOCK_PENDING;
+	}
+	return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+		bool interruptible,
+		struct ttm_object_file *tfile)
+{
+	const char *wmsg;
+	int flags, ret;
+
+	ret = 0;
+	if (interruptible) {
+		flags = PCATCH;
+		wmsg = "ttmwi";
+	} else {
+		flags = 0;
+		wmsg = "ttmw";
+	}
+	mtx_lock(&lock->lock);
+	while (!__ttm_vt_lock(lock)) {
+		ret = -msleep(lock, &lock->lock, flags, wmsg, 0);
+		if (ret == -EINTR || ret == -ERESTART)
+			ret = -ERESTARTSYS;
+		if (interruptible && ret != 0) {
+			lock->flags &= ~TTM_VT_LOCK_PENDING;
+			wakeup(lock);
+			break;
+		}
+	}
+
+	/*
+	 * Add a base-object, the destructor of which will
+	 * make sure the lock is released if the client dies
+	 * while holding it.
+	 */
+
+	ret = ttm_base_object_init(tfile, &lock->base, false,
+				   ttm_lock_type, &ttm_vt_lock_remove, NULL);
+	if (ret)
+		(void)__ttm_vt_unlock(lock);
+	else
+		lock->vt_holder = tfile;
+
+	return (ret);
+}
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+	return ttm_ref_object_base_unref(lock->vt_holder,
+					 lock->base.hash.key, TTM_REF_USAGE);
+}
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+	mtx_lock(&lock->lock);
+	lock->flags &= ~TTM_SUSPEND_LOCK;
+	wakeup(lock);
+	mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	if (lock->rw == 0) {
+		lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+		lock->flags |= TTM_SUSPEND_LOCK;
+		locked = true;
+	} else {
+		lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+	}
+	return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+	mtx_lock(&lock->lock);
+	while (!__ttm_suspend_lock(lock))
+		msleep(lock, &lock->lock, 0, "ttms", 0);
+	mtx_unlock(&lock->lock);
+}


Property changes on: trunk/sys/dev/drm2/ttm/ttm_lock.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_lock.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_lock.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_lock.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,229 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_lock.h 247848 2013-03-05 16:14:55Z kib $ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/ttm/ttm_object.h>
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+	struct ttm_base_object base;
+	struct mtx lock;
+	int32_t rw;
+	uint32_t flags;
+	bool kill_takers;
+	int signal;
+	struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+		       struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+				     int signal)
+{
+	lock->kill_takers = val;
+	if (val)
+		lock->signal = signal;
+}
+
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_lock.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_memory.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_memory.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_memory.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,470 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_memory.c 248663 2013-03-23 20:46:47Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+
+#define TTM_MEMORY_ALLOC_RETRIES 4
+
+struct ttm_mem_zone {
+	u_int kobj_ref;
+	struct ttm_mem_global *glob;
+	const char *name;
+	uint64_t zone_mem;
+	uint64_t emer_mem;
+	uint64_t max_mem;
+	uint64_t swap_limit;
+	uint64_t used_mem;
+};
+
+MALLOC_DEFINE(M_TTM_ZONE, "ttm_zone", "TTM Zone");
+
+static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone)
+{
+
+	printf("[TTM] Zone %7s: Used memory at exit: %llu kiB\n",
+		zone->name, (unsigned long long)zone->used_mem >> 10);
+	free(zone, M_TTM_ZONE);
+}
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone;
+				 struct attribute *attr,
+				 char *buffer)
+{
+	uint64_t val = 0;
+
+	mtx_lock(&zone->glob->lock);
+	if (attr == &ttm_mem_sys)
+		val = zone->zone_mem;
+	else if (attr == &ttm_mem_emer)
+		val = zone->emer_mem;
+	else if (attr == &ttm_mem_max)
+		val = zone->max_mem;
+	else if (attr == &ttm_mem_swap)
+		val = zone->swap_limit;
+	else if (attr == &ttm_mem_used)
+		val = zone->used_mem;
+	mtx_unlock(&zone->glob->lock);
+
+	return snprintf(buffer, PAGE_SIZE, "%llu\n",
+			(unsigned long long) val >> 10);
+}
+#endif
+
+static void ttm_check_swapping(struct ttm_mem_global *glob);
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
+				  struct attribute *attr,
+				  const char *buffer,
+				  size_t size)
+{
+	int chars;
+	unsigned long val;
+	uint64_t val64;
+
+	chars = sscanf(buffer, "%lu", &val);
+	if (chars == 0)
+		return size;
+
+	val64 = val;
+	val64 <<= 10;
+
+	mtx_lock(&zone->glob->lock);
+	if (val64 > zone->zone_mem)
+		val64 = zone->zone_mem;
+	if (attr == &ttm_mem_emer) {
+		zone->emer_mem = val64;
+		if (zone->max_mem > val64)
+			zone->max_mem = val64;
+	} else if (attr == &ttm_mem_max) {
+		zone->max_mem = val64;
+		if (zone->emer_mem < val64)
+			zone->emer_mem = val64;
+	} else if (attr == &ttm_mem_swap)
+		zone->swap_limit = val64;
+	mtx_unlock(&zone->glob->lock);
+
+	ttm_check_swapping(zone->glob);
+
+	return size;
+}
+#endif
+
+static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
+{
+}
+
+static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
+					bool from_wq, uint64_t extra)
+{
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+	uint64_t target;
+
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+
+		if (from_wq)
+			target = zone->swap_limit;
+		else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
+			target = zone->emer_mem;
+		else
+			target = zone->max_mem;
+
+		target = (extra > target) ? 0ULL : target;
+
+		if (zone->used_mem > target)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * At this point we only support a single shrink callback.
+ * Extend this if needed, perhaps using a linked list of callbacks.
+ * Note that this function is reentrant:
+ * many threads may try to swap out at any given time.
+ */
+
+static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
+		       uint64_t extra)
+{
+	int ret;
+	struct ttm_mem_shrink *shrink;
+
+	mtx_lock(&glob->lock);
+	if (glob->shrink == NULL)
+		goto out;
+
+	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
+		shrink = glob->shrink;
+		mtx_unlock(&glob->lock);
+		ret = shrink->do_shrink(shrink);
+		mtx_lock(&glob->lock);
+		if (unlikely(ret != 0))
+			goto out;
+	}
+out:
+	mtx_unlock(&glob->lock);
+}
+
+
+
+static void ttm_shrink_work(void *arg, int pending __unused)
+{
+	struct ttm_mem_global *glob = arg;
+
+	ttm_shrink(glob, true, 0ULL);
+}
+
+static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
+    uint64_t mem)
+{
+	struct ttm_mem_zone *zone;
+
+	zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
+
+	zone->name = "kernel";
+	zone->zone_mem = mem;
+	zone->max_mem = mem >> 1;
+	zone->emer_mem = (mem >> 1) + (mem >> 2);
+	zone->swap_limit = zone->max_mem - (mem >> 3);
+	zone->used_mem = 0;
+	zone->glob = glob;
+	glob->zone_kernel = zone;
+	refcount_init(&zone->kobj_ref, 1);
+	glob->zones[glob->num_zones++] = zone;
+	return 0;
+}
+
+static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
+    uint64_t mem)
+{
+	struct ttm_mem_zone *zone;
+
+	zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
+
+	/**
+	 * No special dma32 zone needed.
+	 */
+
+	if (mem <= ((uint64_t) 1ULL << 32)) {
+		free(zone, M_TTM_ZONE);
+		return 0;
+	}
+
+	/*
+	 * Limit max dma32 memory to 4GB for now
+	 * until we can figure out how big this
+	 * zone really is.
+	 */
+
+	mem = ((uint64_t) 1ULL << 32);
+	zone->name = "dma32";
+	zone->zone_mem = mem;
+	zone->max_mem = mem >> 1;
+	zone->emer_mem = (mem >> 1) + (mem >> 2);
+	zone->swap_limit = zone->max_mem - (mem >> 3);
+	zone->used_mem = 0;
+	zone->glob = glob;
+	glob->zone_dma32 = zone;
+	refcount_init(&zone->kobj_ref, 1);
+	glob->zones[glob->num_zones++] = zone;
+	return 0;
+}
+
+int ttm_mem_global_init(struct ttm_mem_global *glob)
+{
+	u_int64_t mem;
+	int ret;
+	int i;
+	struct ttm_mem_zone *zone;
+
+	mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF);
+	glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
+	    taskqueue_thread_enqueue, &glob->swap_queue);
+	taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap");
+	TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
+
+	refcount_init(&glob->kobj_ref, 1);
+
+	mem = physmem * PAGE_SIZE;
+
+	ret = ttm_mem_init_kernel_zone(glob, mem);
+	if (unlikely(ret != 0))
+		goto out_no_zone;
+	ret = ttm_mem_init_dma32_zone(glob, mem);
+	if (unlikely(ret != 0))
+		goto out_no_zone;
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		printf("[TTM] Zone %7s: Available graphics memory: %llu kiB\n",
+			zone->name, (unsigned long long)zone->max_mem >> 10);
+	}
+	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+	return 0;
+out_no_zone:
+	ttm_mem_global_release(glob);
+	return ret;
+}
+
+void ttm_mem_global_release(struct ttm_mem_global *glob)
+{
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+
+	/* let the page allocator first stop the shrink work. */
+	ttm_page_alloc_fini();
+	ttm_dma_page_alloc_fini();
+
+	taskqueue_drain(glob->swap_queue, &glob->work);
+	taskqueue_free(glob->swap_queue);
+	glob->swap_queue = NULL;
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		if (refcount_release(&zone->kobj_ref))
+			ttm_mem_zone_kobj_release(zone);
+	}
+	if (refcount_release(&glob->kobj_ref))
+		ttm_mem_global_kobj_release(glob);
+}
+
+static void ttm_check_swapping(struct ttm_mem_global *glob)
+{
+	bool needs_swapping = false;
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+
+	mtx_lock(&glob->lock);
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		if (zone->used_mem > zone->swap_limit) {
+			needs_swapping = true;
+			break;
+		}
+	}
+
+	mtx_unlock(&glob->lock);
+
+	if (unlikely(needs_swapping))
+		taskqueue_enqueue(glob->swap_queue, &glob->work);
+
+}
+
+static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
+				     struct ttm_mem_zone *single_zone,
+				     uint64_t amount)
+{
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+
+	mtx_lock(&glob->lock);
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		if (single_zone && zone != single_zone)
+			continue;
+		zone->used_mem -= amount;
+	}
+	mtx_unlock(&glob->lock);
+}
+
+void ttm_mem_global_free(struct ttm_mem_global *glob,
+			 uint64_t amount)
+{
+	return ttm_mem_global_free_zone(glob, NULL, amount);
+}
+
+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
+				  struct ttm_mem_zone *single_zone,
+				  uint64_t amount, bool reserve)
+{
+	uint64_t limit;
+	int ret = -ENOMEM;
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+
+	mtx_lock(&glob->lock);
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		if (single_zone && zone != single_zone)
+			continue;
+
+		limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
+			zone->emer_mem : zone->max_mem;
+
+		if (zone->used_mem > limit)
+			goto out_unlock;
+	}
+
+	if (reserve) {
+		for (i = 0; i < glob->num_zones; ++i) {
+			zone = glob->zones[i];
+			if (single_zone && zone != single_zone)
+				continue;
+			zone->used_mem += amount;
+		}
+	}
+
+	ret = 0;
+out_unlock:
+	mtx_unlock(&glob->lock);
+	ttm_check_swapping(glob);
+
+	return ret;
+}
+
+
+static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
+				     struct ttm_mem_zone *single_zone,
+				     uint64_t memory,
+				     bool no_wait, bool interruptible)
+{
+	int count = TTM_MEMORY_ALLOC_RETRIES;
+
+	while (unlikely(ttm_mem_global_reserve(glob,
+					       single_zone,
+					       memory, true)
+			!= 0)) {
+		if (no_wait)
+			return -ENOMEM;
+		if (unlikely(count-- == 0))
+			return -ENOMEM;
+		ttm_shrink(glob, false, memory + (memory >> 2) + 16);
+	}
+
+	return 0;
+}
+
+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+			 bool no_wait, bool interruptible)
+{
+	/**
+	 * Normal allocations of kernel memory are registered in
+	 * all zones.
+	 */
+
+	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
+					 interruptible);
+}
+
+#define page_to_pfn(pp) OFF_TO_IDX(VM_PAGE_TO_PHYS(pp))
+
+int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+			      struct vm_page *page,
+			      bool no_wait, bool interruptible)
+{
+
+	struct ttm_mem_zone *zone = NULL;
+
+	/**
+	 * Page allocations may be registed in a single zone
+	 * only if highmem or !dma32.
+	 */
+
+	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
+		zone = glob->zone_kernel;
+	return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
+					 interruptible);
+}
+
+void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
+{
+	struct ttm_mem_zone *zone = NULL;
+
+	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
+		zone = glob->zone_kernel;
+	ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+}
+
+
+size_t ttm_round_pot(size_t size)
+{
+	if ((size & (size - 1)) == 0)
+		return size;
+	else if (size > PAGE_SIZE)
+		return PAGE_ALIGN(size);
+	else {
+		size_t tmp_size = 4;
+
+		while (tmp_size < size)
+			tmp_size <<= 1;
+
+		return tmp_size;
+	}
+	return 0;
+}


Property changes on: trunk/sys/dev/drm2/ttm/ttm_memory.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_memory.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_memory.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_memory.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,150 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_memory.h 247835 2013-03-05 09:49:34Z kib $ */
+
+#ifndef TTM_MEMORY_H
+#define TTM_MEMORY_H
+
+/**
+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
+ *
+ * @do_shrink: The callback function.
+ *
+ * Arguments to the do_shrink functions are intended to be passed using
+ * inheritance. That is, the argument class derives from struct ttm_mem_shrink,
+ * and can be accessed using container_of().
+ */
+
+struct ttm_mem_shrink {
+	int (*do_shrink) (struct ttm_mem_shrink *);
+};
+
+/**
+ * struct ttm_mem_global - Global memory accounting structure.
+ *
+ * @shrink: A single callback to shrink TTM memory usage. Extend this
+ * to a linked list to be able to handle multiple callbacks when needed.
+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
+ * need a separate workqueue since it will spend a lot of time waiting
+ * for the GPU, and this will otherwise block other workqueue tasks(?)
+ * At this point we use only a single-threaded workqueue.
+ * @work: The workqueue callback for the shrink queue.
+ * @lock: Lock to protect the @shrink - and the memory accounting members,
+ * that is, essentially the whole structure with some exceptions.
+ * @zones: Array of pointers to accounting zones.
+ * @num_zones: Number of populated entries in the @zones array.
+ * @zone_kernel: Pointer to the kernel zone.
+ * @zone_highmem: Pointer to the highmem zone if there is one.
+ * @zone_dma32: Pointer to the dma32 zone if there is one.
+ *
+ * Note that this structure is not per device. It should be global for all
+ * graphics devices.
+ */
+
+#define TTM_MEM_MAX_ZONES 2
+struct ttm_mem_zone;
+struct ttm_mem_global {
+	u_int kobj_ref;
+	struct ttm_mem_shrink *shrink;
+	struct taskqueue *swap_queue;
+	struct task work;
+	struct mtx lock;
+	struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
+	unsigned int num_zones;
+	struct ttm_mem_zone *zone_kernel;
+	struct ttm_mem_zone *zone_dma32;
+};
+
+/**
+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
+ *
+ * @shrink: The object to initialize.
+ * @func: The callback function.
+ */
+
+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
+				       int (*func) (struct ttm_mem_shrink *))
+{
+	shrink->do_shrink = func;
+}
+
+/**
+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to register with.
+ * @shrink: An initialized struct ttm_mem_shrink object to register.
+ *
+ * Returns:
+ * -EBUSY: There's already a callback registered. (May change).
+ */
+
+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
+					  struct ttm_mem_shrink *shrink)
+{
+	mtx_lock(&glob->lock);
+	if (glob->shrink != NULL) {
+		mtx_unlock(&glob->lock);
+		return -EBUSY;
+	}
+	glob->shrink = shrink;
+	mtx_unlock(&glob->lock);
+	return 0;
+}
+
+/**
+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to unregister from.
+ * @shrink: A previously registert struct ttm_mem_shrink object.
+ *
+ */
+
+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
+					     struct ttm_mem_shrink *shrink)
+{
+	mtx_lock(&glob->lock);
+	MPASS(glob->shrink == shrink);
+	glob->shrink = NULL;
+	mtx_unlock(&glob->lock);
+}
+
+struct vm_page;
+
+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+				bool no_wait, bool interruptible);
+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
+				uint64_t amount);
+extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+				     struct vm_page *page,
+				     bool no_wait, bool interruptible);
+extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
+				     struct vm_page *page);
+extern size_t ttm_round_pot(size_t size);
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_memory.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_module.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_module.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_module.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,38 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_module.h 247835 2013-03-05 09:49:34Z kib $ */
+
+#ifndef _TTM_MODULE_H_
+#define _TTM_MODULE_H_
+
+#define TTM_PFX "[TTM] "
+
+#endif /* _TTM_MODULE_H_ */


Property changes on: trunk/sys/dev/drm2/ttm/ttm_module.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_object.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_object.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_object.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,456 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_object.c 247835 2013-03-05 09:49:34Z kib $");
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <sys/rwlock.h>
+#include <dev/drm2/ttm/ttm_object.h>
+#include <dev/drm2/ttm/ttm_module.h>
+
+struct ttm_object_file {
+	struct ttm_object_device *tdev;
+	struct rwlock lock;
+	struct list_head ref_list;
+	struct drm_open_hash ref_hash[TTM_REF_NUM];
+	u_int refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+	struct rwlock object_lock;
+	struct drm_open_hash object_hash;
+	atomic_t object_count;
+	struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+	struct drm_hash_item hash;
+	struct list_head head;
+	u_int kref;
+	enum ttm_ref_type ref_type;
+	struct ttm_base_object *obj;
+	struct ttm_object_file *tfile;
+};
+
+MALLOC_DEFINE(M_TTM_OBJ_FILE, "ttm_obj_file", "TTM File Objects");
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+	refcount_acquire(&tfile->refcount);
+	return tfile;
+}
+
+static void ttm_object_file_destroy(struct ttm_object_file *tfile)
+{
+
+	free(tfile, M_TTM_OBJ_FILE);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+	struct ttm_object_file *tfile = *p_tfile;
+
+	*p_tfile = NULL;
+	if (refcount_release(&tfile->refcount))
+		ttm_object_file_destroy(tfile);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+			 struct ttm_base_object *base,
+			 bool shareable,
+			 enum ttm_object_type object_type,
+			 void (*rcount_release) (struct ttm_base_object **),
+			 void (*ref_obj_release) (struct ttm_base_object *,
+						  enum ttm_ref_type ref_type))
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	int ret;
+
+	base->shareable = shareable;
+	base->tfile = ttm_object_file_ref(tfile);
+	base->refcount_release = rcount_release;
+	base->ref_obj_release = ref_obj_release;
+	base->object_type = object_type;
+	refcount_init(&base->refcount, 1);
+	rw_init(&tdev->object_lock, "ttmbao");
+	rw_wlock(&tdev->object_lock);
+	ret = drm_ht_just_insert_please(&tdev->object_hash,
+					    &base->hash,
+					    (unsigned long)base, 31, 0, 0);
+	rw_wunlock(&tdev->object_lock);
+	if (unlikely(ret != 0))
+		goto out_err0;
+
+	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+	if (unlikely(ret != 0))
+		goto out_err1;
+
+	ttm_base_object_unref(&base);
+
+	return 0;
+out_err1:
+	rw_wlock(&tdev->object_lock);
+	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+	rw_wunlock(&tdev->object_lock);
+out_err0:
+	return ret;
+}
+
+static void ttm_release_base(struct ttm_base_object *base)
+{
+	struct ttm_object_device *tdev = base->tfile->tdev;
+
+	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+	rw_wunlock(&tdev->object_lock);
+	/*
+	 * Note: We don't use synchronize_rcu() here because it's far
+	 * too slow. It's up to the user to free the object using
+	 * call_rcu() or ttm_base_object_kfree().
+	 */
+
+	if (base->refcount_release) {
+		ttm_object_file_unref(&base->tfile);
+		base->refcount_release(&base);
+	}
+	rw_wlock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct ttm_object_device *tdev = base->tfile->tdev;
+
+	*p_base = NULL;
+
+	/*
+	 * Need to take the lock here to avoid racing with
+	 * users trying to look up the object.
+	 */
+
+	rw_wlock(&tdev->object_lock);
+	if (refcount_release(&base->refcount))
+		ttm_release_base(base);
+	rw_wunlock(&tdev->object_lock);
+}
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+					       uint32_t key)
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	struct ttm_base_object *base;
+	struct drm_hash_item *hash;
+	int ret;
+
+	rw_rlock(&tdev->object_lock);
+	ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+	if (ret == 0) {
+		base = drm_hash_entry(hash, struct ttm_base_object, hash);
+		refcount_acquire(&base->refcount);
+	}
+	rw_runlock(&tdev->object_lock);
+
+	if (unlikely(ret != 0))
+		return NULL;
+
+	if (tfile != base->tfile && !base->shareable) {
+		printf("[TTM] Attempted access of non-shareable object %p\n",
+		    base);
+		ttm_base_object_unref(&base);
+		return NULL;
+	}
+
+	return base;
+}
+
+MALLOC_DEFINE(M_TTM_OBJ_REF, "ttm_obj_ref", "TTM Ref Objects");
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+		       struct ttm_base_object *base,
+		       enum ttm_ref_type ref_type, bool *existed)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+	struct ttm_ref_object *ref;
+	struct drm_hash_item *hash;
+	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+	int ret = -EINVAL;
+
+	if (existed != NULL)
+		*existed = true;
+
+	while (ret == -EINVAL) {
+		rw_rlock(&tfile->lock);
+		ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+		if (ret == 0) {
+			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+			refcount_acquire(&ref->kref);
+			rw_runlock(&tfile->lock);
+			break;
+		}
+
+		rw_runlock(&tfile->lock);
+		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+					   false, false);
+		if (unlikely(ret != 0))
+			return ret;
+		ref = malloc(sizeof(*ref), M_TTM_OBJ_REF, M_WAITOK);
+		if (unlikely(ref == NULL)) {
+			ttm_mem_global_free(mem_glob, sizeof(*ref));
+			return -ENOMEM;
+		}
+
+		ref->hash.key = base->hash.key;
+		ref->obj = base;
+		ref->tfile = tfile;
+		ref->ref_type = ref_type;
+		refcount_init(&ref->kref, 1);
+
+		rw_wlock(&tfile->lock);
+		ret = drm_ht_insert_item(ht, &ref->hash);
+
+		if (ret == 0) {
+			list_add_tail(&ref->head, &tfile->ref_list);
+			refcount_acquire(&base->refcount);
+			rw_wunlock(&tfile->lock);
+			if (existed != NULL)
+				*existed = false;
+			break;
+		}
+
+		rw_wunlock(&tfile->lock);
+		MPASS(ret == -EINVAL);
+
+		ttm_mem_global_free(mem_glob, sizeof(*ref));
+		free(ref, M_TTM_OBJ_REF);
+	}
+
+	return ret;
+}
+
+static void ttm_ref_object_release(struct ttm_ref_object *ref)
+{
+	struct ttm_base_object *base = ref->obj;
+	struct ttm_object_file *tfile = ref->tfile;
+	struct drm_open_hash *ht;
+	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+	ht = &tfile->ref_hash[ref->ref_type];
+	(void)drm_ht_remove_item(ht, &ref->hash);
+	list_del(&ref->head);
+	rw_wunlock(&tfile->lock);
+
+	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+		base->ref_obj_release(base, ref->ref_type);
+
+	ttm_base_object_unref(&ref->obj);
+	ttm_mem_global_free(mem_glob, sizeof(*ref));
+	free(ref, M_TTM_OBJ_REF);
+	rw_wlock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+			      unsigned long key, enum ttm_ref_type ref_type)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+	struct ttm_ref_object *ref;
+	struct drm_hash_item *hash;
+	int ret;
+
+	rw_wlock(&tfile->lock);
+	ret = drm_ht_find_item(ht, key, &hash);
+	if (unlikely(ret != 0)) {
+		rw_wunlock(&tfile->lock);
+		return -EINVAL;
+	}
+	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+	if (refcount_release(&ref->kref))
+		ttm_ref_object_release(ref);
+	rw_wunlock(&tfile->lock);
+	return 0;
+}
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+	struct ttm_ref_object *ref;
+	struct list_head *list;
+	unsigned int i;
+	struct ttm_object_file *tfile = *p_tfile;
+
+	*p_tfile = NULL;
+	rw_wlock(&tfile->lock);
+
+	/*
+	 * Since we release the lock within the loop, we have to
+	 * restart it from the beginning each time.
+	 */
+
+	while (!list_empty(&tfile->ref_list)) {
+		list = tfile->ref_list.next;
+		ref = list_entry(list, struct ttm_ref_object, head);
+		ttm_ref_object_release(ref);
+	}
+
+	for (i = 0; i < TTM_REF_NUM; ++i)
+		drm_ht_remove(&tfile->ref_hash[i]);
+
+	rw_wunlock(&tfile->lock);
+	ttm_object_file_unref(&tfile);
+}
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+					     unsigned int hash_order)
+{
+	struct ttm_object_file *tfile;
+	unsigned int i;
+	unsigned int j = 0;
+	int ret;
+
+	tfile = malloc(sizeof(*tfile), M_TTM_OBJ_FILE, M_WAITOK);
+	rw_init(&tfile->lock, "ttmfo");
+	tfile->tdev = tdev;
+	refcount_init(&tfile->refcount, 1);
+	INIT_LIST_HEAD(&tfile->ref_list);
+
+	for (i = 0; i < TTM_REF_NUM; ++i) {
+		ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+		if (ret) {
+			j = i;
+			goto out_err;
+		}
+	}
+
+	return tfile;
+out_err:
+	for (i = 0; i < j; ++i)
+		drm_ht_remove(&tfile->ref_hash[i]);
+
+	free(tfile, M_TTM_OBJ_FILE);
+
+	return NULL;
+}
+
+MALLOC_DEFINE(M_TTM_OBJ_DEV, "ttm_obj_dev", "TTM Device Objects");
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+						 *mem_glob,
+						 unsigned int hash_order)
+{
+	struct ttm_object_device *tdev;
+	int ret;
+
+	tdev = malloc(sizeof(*tdev), M_TTM_OBJ_DEV, M_WAITOK);
+	tdev->mem_glob = mem_glob;
+	rw_init(&tdev->object_lock, "ttmdo");
+	atomic_set(&tdev->object_count, 0);
+	ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+	if (ret == 0)
+		return tdev;
+
+	free(tdev, M_TTM_OBJ_DEV);
+	return NULL;
+}
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+	struct ttm_object_device *tdev = *p_tdev;
+
+	*p_tdev = NULL;
+
+	rw_wlock(&tdev->object_lock);
+	drm_ht_remove(&tdev->object_hash);
+	rw_wunlock(&tdev->object_lock);
+
+	free(tdev, M_TTM_OBJ_DEV);
+}


Property changes on: trunk/sys/dev/drm2/ttm/ttm_object.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_object.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_object.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_object.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,272 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_object.h 247835 2013-03-05 09:49:34Z kib $ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <dev/drm2/drm_hashtab.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+	TTM_REF_USAGE,
+	TTM_REF_SYNCCPU_READ,
+	TTM_REF_SYNCCPU_WRITE,
+	TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+	ttm_fence_type,
+	ttm_buffer_type,
+	ttm_lock_type,
+	ttm_driver_type0 = 256,
+	ttm_driver_type1,
+	ttm_driver_type2,
+	ttm_driver_type3,
+	ttm_driver_type4,
+	ttm_driver_type5
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * This function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+	/* struct rcu_head rhead;XXXKIB */
+	struct drm_hash_item hash;
+	enum ttm_object_type object_type;
+	bool shareable;
+	struct ttm_object_file *tfile;
+	u_int refcount;
+	void (*refcount_release) (struct ttm_base_object **base);
+	void (*ref_obj_release) (struct ttm_base_object *base,
+				 enum ttm_ref_type ref_type);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+				struct ttm_base_object *base,
+				bool shareable,
+				enum ttm_object_type type,
+				void (*refcount_release) (struct ttm_base_object
+							  **),
+				void (*ref_obj_release) (struct ttm_base_object
+							 *,
+							 enum ttm_ref_type
+							 ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * Also verifies that the object is visible to the application, by
+ * comparing the @tfile argument and checking the object shareable flag.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+						      *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+			      struct ttm_base_object *base,
+			      enum ttm_ref_type ref_type, bool *existed);
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+				     unsigned long key,
+				     enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+						    *tdev,
+						    unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @hash_order: Order of hash table used to hash the base objects.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *ttm_object_device_init
+    (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_object.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_page_alloc.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_page_alloc.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_page_alloc.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,930 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied at redhat.com>
+ *          Jerome Glisse <jglisse at redhat.com>
+ *          Pauli Nieminen <suokkos at gmail.com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib at FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_page_alloc.c 285002 2015-07-01 11:28:42Z avg $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+#include <vm/vm_pageout.h>
+
+#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(vm_page_t))
+#define SMALL_ALLOCATION		16
+#define FREE_ALL_PAGES			(~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL		1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+	struct mtx		lock;
+	bool			fill_lock;
+	bool			dma32;
+	struct pglist		list;
+	int			ttm_page_alloc_flags;
+	unsigned		npages;
+	char			*name;
+	unsigned long		nfrees;
+	unsigned long		nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+	unsigned	alloc_size;
+	unsigned	max_size;
+	unsigned	small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+	unsigned int kobj_ref;
+	eventhandler_tag lowmem_handler;
+	struct ttm_pool_opts	options;
+
+	union {
+		struct ttm_page_pool	u_pools[NUM_POOLS];
+		struct _utag {
+			struct ttm_page_pool	u_wc_pool;
+			struct ttm_page_pool	u_uc_pool;
+			struct ttm_page_pool	u_wc_pool_dma32;
+			struct ttm_page_pool	u_uc_pool_dma32;
+		} _ut;
+	} _u;
+};
+
+#define	pools _u.u_pools
+#define	wc_pool _u._ut.u_wc_pool
+#define	uc_pool _u._ut.u_uc_pool
+#define	wc_pool_dma32 _u._ut.u_wc_pool_dma32
+#define	uc_pool_dma32 _u._ut.u_uc_pool_dma32
+
+MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
+
+static void
+ttm_vm_page_free(vm_page_t m)
+{
+
+	KASSERT(m->object == NULL, ("ttm page %p is owned", m));
+	KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
+	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
+	m->flags &= ~PG_FICTITIOUS;
+	m->oflags |= VPO_UNMANAGED;
+	vm_page_unwire(m, 0);
+	vm_page_free(m);
+}
+
+static vm_memattr_t
+ttm_caching_state_to_vm(enum ttm_caching_state cstate)
+{
+
+	switch (cstate) {
+	case tt_uncached:
+		return (VM_MEMATTR_UNCACHEABLE);
+	case tt_wc:
+		return (VM_MEMATTR_WRITE_COMBINING);
+	case tt_cached:
+		return (VM_MEMATTR_WRITE_BACK);
+	}
+	panic("caching state %d\n", cstate);
+}
+
+static vm_page_t
+ttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr)
+{
+	vm_page_t p;
+	int tries;
+
+	for (tries = 0; ; tries++) {
+		p = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff,
+		    PAGE_SIZE, 0, memattr);
+		if (p != NULL || tries > 2)
+			return (p);
+
+		/*
+		 * Before growing the cache see if this is just a normal
+		 * memory shortage.
+		 */
+		VM_WAIT;
+		vm_pageout_grow_cache(tries, 0, 0xffffffff);
+	}
+}
+
+static vm_page_t
+ttm_vm_page_alloc_any(int req, vm_memattr_t memattr)
+{
+	vm_page_t p;
+
+	while (1) {
+		p = vm_page_alloc(NULL, 0, req);
+		if (p != NULL)
+			break;
+		VM_WAIT;
+	}
+	pmap_page_set_memattr(p, memattr);
+	return (p);
+}
+
+static vm_page_t
+ttm_vm_page_alloc(int flags, enum ttm_caching_state cstate)
+{
+	vm_page_t p;
+	vm_memattr_t memattr;
+	int req;
+
+	memattr = ttm_caching_state_to_vm(cstate);
+	req = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
+	if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0)
+		req |= VM_ALLOC_ZERO;
+
+	if ((flags & TTM_PAGE_FLAG_DMA32) != 0)
+		p = ttm_vm_page_alloc_dma32(req, memattr);
+	else
+		p = ttm_vm_page_alloc_any(req, memattr);
+
+	if (p != NULL) {
+		p->oflags &= ~VPO_UNMANAGED;
+		p->flags |= PG_FICTITIOUS;
+	}
+	return (p);
+}
+
+static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
+{
+
+	free(m, M_TTM_POOLMGR);
+}
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
+		struct attribute *attr, const char *buffer, size_t size)
+{
+	int chars;
+	unsigned val;
+	chars = sscanf(buffer, "%u", &val);
+	if (chars == 0)
+		return size;
+
+	/* Convert kb to number of pages */
+	val = val / (PAGE_SIZE >> 10);
+
+	if (attr == &ttm_page_pool_max)
+		m->options.max_size = val;
+	else if (attr == &ttm_page_pool_small)
+		m->options.small = val;
+	else if (attr == &ttm_page_pool_alloc_size) {
+		if (val > NUM_PAGES_TO_ALLOC*8) {
+			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+			return size;
+		} else if (val > NUM_PAGES_TO_ALLOC) {
+			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+		}
+		m->options.alloc_size = val;
+	}
+
+	return size;
+}
+
+static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
+		struct attribute *attr, char *buffer)
+{
+	unsigned val = 0;
+
+	if (attr == &ttm_page_pool_max)
+		val = m->options.max_size;
+	else if (attr == &ttm_page_pool_small)
+		val = m->options.small;
+	else if (attr == &ttm_page_pool_alloc_size)
+		val = m->options.alloc_size;
+
+	val = val * (PAGE_SIZE >> 10);
+
+	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+#endif
+
+static struct ttm_pool_manager *_manager;
+
+static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_BACK);
+#endif
+	return 0;
+}
+
+static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_COMBINING);
+#endif
+	return 0;
+}
+
+static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		pmap_page_set_memattr(pages[i], VM_MEMATTR_UNCACHEABLE);
+#endif
+	return 0;
+}
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+		enum ttm_caching_state cstate)
+{
+	int pool_index;
+
+	if (cstate == tt_cached)
+		return NULL;
+
+	if (cstate == tt_wc)
+		pool_index = 0x0;
+	else
+		pool_index = 0x1;
+
+	if (flags & TTM_PAGE_FLAG_DMA32)
+		pool_index |= 0x2;
+
+	return &_manager->pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(vm_page_t *pages, unsigned npages)
+{
+	unsigned i;
+
+	/* Our VM handles vm memattr automatically on the page free. */
+	if (set_pages_array_wb(pages, npages))
+		printf("[TTM] Failed to set %d pages to wb!\n", npages);
+	for (i = 0; i < npages; ++i)
+		ttm_vm_page_free(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+		unsigned freed_pages)
+{
+	pool->npages -= freed_pages;
+	pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+	vm_page_t p, p1;
+	vm_page_t *pages_to_free;
+	unsigned freed_pages = 0,
+		 npages_to_free = nr_free;
+	unsigned i;
+
+	if (NUM_PAGES_TO_ALLOC < nr_free)
+		npages_to_free = NUM_PAGES_TO_ALLOC;
+
+	pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
+	    M_TEMP, M_WAITOK | M_ZERO);
+
+restart:
+	mtx_lock(&pool->lock);
+
+	TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) {
+		if (freed_pages >= npages_to_free)
+			break;
+
+		pages_to_free[freed_pages++] = p;
+		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+			/* remove range of pages from the pool */
+			for (i = 0; i < freed_pages; i++)
+				TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
+
+			ttm_pool_update_free_locked(pool, freed_pages);
+			/**
+			 * Because changing page caching is costly
+			 * we unlock the pool to prevent stalling.
+			 */
+			mtx_unlock(&pool->lock);
+
+			ttm_pages_put(pages_to_free, freed_pages);
+			if (likely(nr_free != FREE_ALL_PAGES))
+				nr_free -= freed_pages;
+
+			if (NUM_PAGES_TO_ALLOC >= nr_free)
+				npages_to_free = nr_free;
+			else
+				npages_to_free = NUM_PAGES_TO_ALLOC;
+
+			freed_pages = 0;
+
+			/* free all so restart the processing */
+			if (nr_free)
+				goto restart;
+
+			/* Not allowed to fall through or break because
+			 * following context is inside spinlock while we are
+			 * outside here.
+			 */
+			goto out;
+
+		}
+	}
+
+	/* remove range of pages from the pool */
+	if (freed_pages) {
+		for (i = 0; i < freed_pages; i++)
+			TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
+
+		ttm_pool_update_free_locked(pool, freed_pages);
+		nr_free -= freed_pages;
+	}
+
+	mtx_unlock(&pool->lock);
+
+	if (freed_pages)
+		ttm_pages_put(pages_to_free, freed_pages);
+out:
+	free(pages_to_free, M_TEMP);
+	return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+	unsigned i;
+	int total = 0;
+	for (i = 0; i < NUM_POOLS; ++i)
+		total += _manager->pools[i].npages;
+
+	return total;
+}
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(void *arg)
+{
+	static unsigned int start_pool = 0;
+	unsigned i;
+	unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
+	struct ttm_page_pool *pool;
+	int shrink_pages = 100; /* XXXKIB */
+
+	pool_offset = pool_offset % NUM_POOLS;
+	/* select start pool in round robin fashion */
+	for (i = 0; i < NUM_POOLS; ++i) {
+		unsigned nr_free = shrink_pages;
+		if (shrink_pages == 0)
+			break;
+		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+		shrink_pages = ttm_page_pool_free(pool, nr_free);
+	}
+	/* return estimated number of unused pages in pool */
+	return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+
+	manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
+	    ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+
+	EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
+}
+
+static int ttm_set_pages_caching(vm_page_t *pages,
+		enum ttm_caching_state cstate, unsigned cpages)
+{
+	int r = 0;
+	/* Set page caching */
+	switch (cstate) {
+	case tt_uncached:
+		r = set_pages_array_uc(pages, cpages);
+		if (r)
+			printf("[TTM] Failed to set %d pages to uc!\n", cpages);
+		break;
+	case tt_wc:
+		r = set_pages_array_wc(pages, cpages);
+		if (r)
+			printf("[TTM] Failed to set %d pages to wc!\n", cpages);
+		break;
+	default:
+		break;
+	}
+	return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct pglist *pages,
+		int ttm_flags, enum ttm_caching_state cstate,
+		vm_page_t *failed_pages, unsigned cpages)
+{
+	unsigned i;
+	/* Failed pages have to be freed */
+	for (i = 0; i < cpages; ++i) {
+		TAILQ_REMOVE(pages, failed_pages[i], plinks.q);
+		ttm_vm_page_free(failed_pages[i]);
+	}
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
+		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+	vm_page_t *caching_array;
+	vm_page_t p;
+	int r = 0;
+	unsigned i, cpages;
+	unsigned max_cpages = min(count,
+			(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
+
+	/* allocate array for page caching change */
+	caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
+	    M_WAITOK | M_ZERO);
+
+	for (i = 0, cpages = 0; i < count; ++i) {
+		p = ttm_vm_page_alloc(ttm_alloc_flags, cstate);
+		if (!p) {
+			printf("[TTM] Unable to get page %u\n", i);
+
+			/* store already allocated pages in the pool after
+			 * setting the caching state */
+			if (cpages) {
+				r = ttm_set_pages_caching(caching_array,
+							  cstate, cpages);
+				if (r)
+					ttm_handle_caching_state_failure(pages,
+						ttm_flags, cstate,
+						caching_array, cpages);
+			}
+			r = -ENOMEM;
+			goto out;
+		}
+
+#ifdef CONFIG_HIGHMEM /* KIB: nop */
+		/* gfp flags of highmem page should never be dma32 so we
+		 * we should be fine in such case
+		 */
+		if (!PageHighMem(p))
+#endif
+		{
+			caching_array[cpages++] = p;
+			if (cpages == max_cpages) {
+
+				r = ttm_set_pages_caching(caching_array,
+						cstate, cpages);
+				if (r) {
+					ttm_handle_caching_state_failure(pages,
+						ttm_flags, cstate,
+						caching_array, cpages);
+					goto out;
+				}
+				cpages = 0;
+			}
+		}
+
+		TAILQ_INSERT_HEAD(pages, p, plinks.q);
+	}
+
+	if (cpages) {
+		r = ttm_set_pages_caching(caching_array, cstate, cpages);
+		if (r)
+			ttm_handle_caching_state_failure(pages,
+					ttm_flags, cstate,
+					caching_array, cpages);
+	}
+out:
+	free(caching_array, M_TEMP);
+
+	return r;
+}
+
+/**
+ * Fill the given pool if there aren't enough pages and the requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+    int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+	vm_page_t p;
+	int r;
+	unsigned cpages = 0;
+	/**
+	 * Only allow one pool fill operation at a time.
+	 * If pool doesn't have enough pages for the allocation new pages are
+	 * allocated from outside of pool.
+	 */
+	if (pool->fill_lock)
+		return;
+
+	pool->fill_lock = true;
+
+	/* If allocation request is small and there are not enough
+	 * pages in a pool we fill the pool up first. */
+	if (count < _manager->options.small
+		&& count > pool->npages) {
+		struct pglist new_pages;
+		unsigned alloc_size = _manager->options.alloc_size;
+
+		/**
+		 * Can't change page caching if in irqsave context. We have to
+		 * drop the pool->lock.
+		 */
+		mtx_unlock(&pool->lock);
+
+		TAILQ_INIT(&new_pages);
+		r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
+		    ttm_flags, cstate, alloc_size);
+		mtx_lock(&pool->lock);
+
+		if (!r) {
+			TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
+			++pool->nrefills;
+			pool->npages += alloc_size;
+		} else {
+			printf("[TTM] Failed to fill pool (%p)\n", pool);
+			/* If we have any pages left put them to the pool. */
+			TAILQ_FOREACH(p, &pool->list, plinks.q) {
+				++cpages;
+			}
+			TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
+			pool->npages += cpages;
+		}
+
+	}
+	pool->fill_lock = false;
+}
+
+/**
+ * Cut 'count' number of pages from the pool and put them on the return list.
+ *
+ * @return count of pages still required to fulfill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+					struct pglist *pages,
+					int ttm_flags,
+					enum ttm_caching_state cstate,
+					unsigned count)
+{
+	vm_page_t p;
+	unsigned i;
+
+	mtx_lock(&pool->lock);
+	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
+
+	if (count >= pool->npages) {
+		/* take all pages from the pool */
+		TAILQ_CONCAT(pages, &pool->list, plinks.q);
+		count -= pool->npages;
+		pool->npages = 0;
+		goto out;
+	}
+	for (i = 0; i < count; i++) {
+		p = TAILQ_FIRST(&pool->list);
+		TAILQ_REMOVE(&pool->list, p, plinks.q);
+		TAILQ_INSERT_TAIL(pages, p, plinks.q);
+	}
+	pool->npages -= count;
+	count = 0;
+out:
+	mtx_unlock(&pool->lock);
+	return count;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
+			  enum ttm_caching_state cstate)
+{
+	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+	unsigned i;
+
+	if (pool == NULL) {
+		/* No pool for this memory type so free the pages */
+		for (i = 0; i < npages; i++) {
+			if (pages[i]) {
+				ttm_vm_page_free(pages[i]);
+				pages[i] = NULL;
+			}
+		}
+		return;
+	}
+
+	mtx_lock(&pool->lock);
+	for (i = 0; i < npages; i++) {
+		if (pages[i]) {
+			TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q);
+			pages[i] = NULL;
+			pool->npages++;
+		}
+	}
+	/* Check that we don't go over the pool limit */
+	npages = 0;
+	if (pool->npages > _manager->options.max_size) {
+		npages = pool->npages - _manager->options.max_size;
+		/* free at least NUM_PAGES_TO_ALLOC number of pages
+		 * to reduce calls to set_memory_wb */
+		if (npages < NUM_PAGES_TO_ALLOC)
+			npages = NUM_PAGES_TO_ALLOC;
+	}
+	mtx_unlock(&pool->lock);
+	if (npages)
+		ttm_page_pool_free(pool, npages);
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
+			 enum ttm_caching_state cstate)
+{
+	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+	struct pglist plist;
+	vm_page_t p = NULL;
+	int gfp_flags;
+	unsigned count;
+	int r;
+
+	/* No pool for cached pages */
+	if (pool == NULL) {
+		for (r = 0; r < npages; ++r) {
+			p = ttm_vm_page_alloc(flags, cstate);
+			if (!p) {
+				printf("[TTM] Unable to allocate page\n");
+				return -ENOMEM;
+			}
+			pages[r] = p;
+		}
+		return 0;
+	}
+
+	/* combine zero flag to pool flags */
+	gfp_flags = flags | pool->ttm_page_alloc_flags;
+
+	/* First we take pages from the pool */
+	TAILQ_INIT(&plist);
+	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+	count = 0;
+	TAILQ_FOREACH(p, &plist, plinks.q) {
+		pages[count++] = p;
+	}
+
+	/* clear the pages coming from the pool if requested */
+	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+		TAILQ_FOREACH(p, &plist, plinks.q) {
+			pmap_zero_page(p);
+		}
+	}
+
+	/* If pool didn't have enough pages allocate new one. */
+	if (npages > 0) {
+		/* ttm_alloc_new_pages doesn't reference pool so we can run
+		 * multiple requests in parallel.
+		 **/
+		TAILQ_INIT(&plist);
+		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
+		    npages);
+		TAILQ_FOREACH(p, &plist, plinks.q) {
+			pages[count++] = p;
+		}
+		if (r) {
+			/* If there is any pages in the list put them back to
+			 * the pool. */
+			printf("[TTM] Failed to allocate extra pages for large request\n");
+			ttm_put_pages(pages, count, flags, cstate);
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+				      char *name)
+{
+	mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
+	pool->fill_lock = false;
+	TAILQ_INIT(&pool->list);
+	pool->npages = pool->nfrees = 0;
+	pool->ttm_page_alloc_flags = flags;
+	pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+
+	if (_manager != NULL)
+		printf("[TTM] manager != NULL\n");
+	printf("[TTM] Initializing pool allocator\n");
+
+	_manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
+
+	ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
+	ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
+	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
+	    TTM_PAGE_FLAG_DMA32, "wc dma");
+	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
+	    TTM_PAGE_FLAG_DMA32, "uc dma");
+
+	_manager->options.max_size = max_pages;
+	_manager->options.small = SMALL_ALLOCATION;
+	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+	refcount_init(&_manager->kobj_ref, 1);
+	ttm_pool_mm_shrink_init(_manager);
+
+	return 0;
+}
+
+void ttm_page_alloc_fini(void)
+{
+	int i;
+
+	printf("[TTM] Finalizing pool allocator\n");
+	ttm_pool_mm_shrink_fini(_manager);
+
+	for (i = 0; i < NUM_POOLS; ++i)
+		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+
+	if (refcount_release(&_manager->kobj_ref))
+		ttm_pool_kobj_release(_manager);
+	_manager = NULL;
+}
+
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+	unsigned i;
+	int ret;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		ret = ttm_get_pages(&ttm->pages[i], 1,
+				    ttm->page_flags,
+				    ttm->caching_state);
+		if (ret != 0) {
+			ttm_pool_unpopulate(ttm);
+			return -ENOMEM;
+		}
+
+		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+						false, false);
+		if (unlikely(ret != 0)) {
+			ttm_pool_unpopulate(ttm);
+			return -ENOMEM;
+		}
+	}
+
+	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+		ret = ttm_tt_swapin(ttm);
+		if (unlikely(ret != 0)) {
+			ttm_pool_unpopulate(ttm);
+			return ret;
+		}
+	}
+
+	ttm->state = tt_unbound;
+	return 0;
+}
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+	unsigned i;
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		if (ttm->pages[i]) {
+			ttm_mem_global_free_page(ttm->glob->mem_glob,
+						 ttm->pages[i]);
+			ttm_put_pages(&ttm->pages[i], 1,
+				      ttm->page_flags,
+				      ttm->caching_state);
+		}
+	}
+	ttm->state = tt_unpopulated;
+}
+
+#if 0
+/* XXXKIB sysctl */
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+	struct ttm_page_pool *p;
+	unsigned i;
+	char *h[] = {"pool", "refills", "pages freed", "size"};
+	if (!_manager) {
+		seq_printf(m, "No pool allocator running.\n");
+		return 0;
+	}
+	seq_printf(m, "%6s %12s %13s %8s\n",
+			h[0], h[1], h[2], h[3]);
+	for (i = 0; i < NUM_POOLS; ++i) {
+		p = &_manager->pools[i];
+
+		seq_printf(m, "%6s %12ld %13ld %8d\n",
+				p->name, p->nrefills,
+				p->nfrees, p->npages);
+	}
+	return 0;
+}
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_page_alloc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_page_alloc.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_page_alloc.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_page_alloc.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,104 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied at redhat.com>
+ *          Jerome Glisse <jglisse at redhat.com>
+ */
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_page_alloc.h 247835 2013-03-05 09:49:34Z kib $ */
+#ifndef TTM_PAGE_ALLOC
+#define TTM_PAGE_ALLOC
+
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+
+/**
+ * Initialize pool allocator.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * ttm_pool_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Add backing pages to all of @ttm
+ */
+extern int ttm_pool_populate(struct ttm_tt *ttm);
+
+/**
+ * ttm_pool_unpopulate:
+ *
+ * @ttm: The struct ttm_tt which to free backing pages.
+ *
+ * Free all pages of @ttm
+ */
+extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+/* XXXKIB
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+*/
+
+#ifdef CONFIG_SWIOTLB
+/**
+ * Initialize pool allocator.
+ */
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+
+/**
+ * Free pool allocator.
+ */
+void ttm_dma_page_alloc_fini(void);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
+
+extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+
+#else
+static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
+					  unsigned max_pages)
+{
+	return -ENODEV;
+}
+
+static inline void ttm_dma_page_alloc_fini(void) { return; }
+
+/* XXXKIB
+static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+	return 0;
+}
+*/
+#endif
+
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_page_alloc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_page_alloc_dma.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_page_alloc_dma.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_page_alloc_dma.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,1135 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ *   the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ *   when freed).
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_page_alloc_dma.c 247835 2013-03-05 09:49:34Z kib $");
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION		4
+#define FREE_ALL_PAGES			(~0U)
+/* times are in msecs */
+#define IS_UNDEFINED			(0)
+#define IS_WC				(1<<1)
+#define IS_UC				(1<<2)
+#define IS_CACHED			(1<<3)
+#define IS_DMA32			(1<<4)
+
+enum pool_type {
+	POOL_IS_UNDEFINED,
+	POOL_IS_WC = IS_WC,
+	POOL_IS_UC = IS_UC,
+	POOL_IS_CACHED = IS_CACHED,
+	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ *  - generic (not restricted to DMA32):
+ *      - write combined, uncached, cached.
+ *  - dma32 (up to 2^32 - so up 4GB):
+ *      - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ *   it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ *   Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+	struct list_head pools; /* The 'struct device->dma_pools link */
+	enum pool_type type;
+	spinlock_t lock;
+	struct list_head inuse_list;
+	struct list_head free_list;
+	struct device *dev;
+	unsigned size;
+	unsigned npages_free;
+	unsigned npages_in_use;
+	unsigned long nfrees; /* Stats when shrunk. */
+	unsigned long nrefills; /* Stats when grown. */
+	gfp_t gfp_flags;
+	char name[13]; /* "cached dma32" */
+	char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ *   via the DMA API, it will be -1.
+ */
+struct dma_page {
+	struct list_head page_list;
+	void *vaddr;
+	struct page *p;
+	dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+	unsigned	alloc_size;
+	unsigned	max_size;
+	unsigned	small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+	struct list_head pools;
+	struct device *dev;
+	struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+	struct mutex		lock;
+	struct list_head	pools;
+	struct ttm_pool_opts	options;
+	unsigned		npools;
+	struct shrinker		mm_shrink;
+	struct kobject		kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+	.name = "pool_max_size",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+	.name = "pool_small_allocation",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+	.name = "pool_allocation_size",
+	.mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+	&ttm_page_pool_max,
+	&ttm_page_pool_small,
+	&ttm_page_pool_alloc_size,
+	NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+			      const char *buffer, size_t size)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	int chars;
+	unsigned val;
+	chars = sscanf(buffer, "%u", &val);
+	if (chars == 0)
+		return size;
+
+	/* Convert kb to number of pages */
+	val = val / (PAGE_SIZE >> 10);
+
+	if (attr == &ttm_page_pool_max)
+		m->options.max_size = val;
+	else if (attr == &ttm_page_pool_small)
+		m->options.small = val;
+	else if (attr == &ttm_page_pool_alloc_size) {
+		if (val > NUM_PAGES_TO_ALLOC*8) {
+			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+			return size;
+		} else if (val > NUM_PAGES_TO_ALLOC) {
+			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+		}
+		m->options.alloc_size = val;
+	}
+
+	return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+			     char *buffer)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	unsigned val = 0;
+
+	if (attr == &ttm_page_pool_max)
+		val = m->options.max_size;
+	else if (attr == &ttm_page_pool_small)
+		val = m->options.small;
+	else if (attr == &ttm_page_pool_alloc_size)
+		val = m->options.alloc_size;
+
+	val = val * (PAGE_SIZE >> 10);
+
+	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+	.show = &ttm_pool_show,
+	.store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+	.release = &ttm_pool_kobj_release,
+	.sysfs_ops = &ttm_pool_sysfs_ops,
+	.default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		unmap_page_from_agp(pages[i]);
+#endif
+	return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		map_page_into_agp(pages[i]);
+#endif
+	return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		map_page_into_agp(pages[i]);
+#endif
+	return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+				 struct page **pages, unsigned cpages)
+{
+	int r = 0;
+	/* Set page caching */
+	if (pool->type & IS_UC) {
+		r = set_pages_array_uc(pages, cpages);
+		if (r)
+			pr_err("%s: Failed to set %d pages to uc!\n",
+			       pool->dev_name, cpages);
+	}
+	if (pool->type & IS_WC) {
+		r = set_pages_array_wc(pages, cpages);
+		if (r)
+			pr_err("%s: Failed to set %d pages to wc!\n",
+			       pool->dev_name, cpages);
+	}
+	return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+	dma_addr_t dma = d_page->dma;
+	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+	kfree(d_page);
+	d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+	struct dma_page *d_page;
+
+	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+	if (!d_page)
+		return NULL;
+
+	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+					   &d_page->dma,
+					   pool->gfp_flags);
+	if (d_page->vaddr)
+		d_page->p = virt_to_page(d_page->vaddr);
+	else {
+		kfree(d_page);
+		d_page = NULL;
+	}
+	return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+	enum pool_type type = IS_UNDEFINED;
+
+	if (flags & TTM_PAGE_FLAG_DMA32)
+		type |= IS_DMA32;
+	if (cstate == tt_cached)
+		type |= IS_CACHED;
+	else if (cstate == tt_uncached)
+		type |= IS_UC;
+	else
+		type |= IS_WC;
+
+	return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+					unsigned freed_pages)
+{
+	pool->npages_free -= freed_pages;
+	pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+			      struct page *pages[], unsigned npages)
+{
+	struct dma_page *d_page, *tmp;
+
+	/* Don't set WB on WB page pool. */
+	if (npages && !(pool->type & IS_CACHED) &&
+	    set_pages_array_wb(pages, npages))
+		pr_err("%s: Failed to set %d pages to wb!\n",
+		       pool->dev_name, npages);
+
+	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+		list_del(&d_page->page_list);
+		__ttm_dma_free_page(pool, d_page);
+	}
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+	/* Don't set WB on WB page pool. */
+	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+		pr_err("%s: Failed to set %d pages to wb!\n",
+		       pool->dev_name, 1);
+
+	list_del(&d_page->page_list);
+	__ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+	unsigned long irq_flags;
+	struct dma_page *dma_p, *tmp;
+	struct page **pages_to_free;
+	struct list_head d_pages;
+	unsigned freed_pages = 0,
+		 npages_to_free = nr_free;
+
+	if (NUM_PAGES_TO_ALLOC < nr_free)
+		npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+	if (nr_free > 1) {
+		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+			 pool->dev_name, pool->name, current->pid,
+			 npages_to_free, nr_free);
+	}
+#endif
+	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+			GFP_KERNEL);
+
+	if (!pages_to_free) {
+		pr_err("%s: Failed to allocate memory for pool free operation\n",
+		       pool->dev_name);
+		return 0;
+	}
+	INIT_LIST_HEAD(&d_pages);
+restart:
+	spin_lock_irqsave(&pool->lock, irq_flags);
+
+	/* We picking the oldest ones off the list */
+	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+					 page_list) {
+		if (freed_pages >= npages_to_free)
+			break;
+
+		/* Move the dma_page from one list to another. */
+		list_move(&dma_p->page_list, &d_pages);
+
+		pages_to_free[freed_pages++] = dma_p->p;
+		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+			ttm_pool_update_free_locked(pool, freed_pages);
+			/**
+			 * Because changing page caching is costly
+			 * we unlock the pool to prevent stalling.
+			 */
+			spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+					  freed_pages);
+
+			INIT_LIST_HEAD(&d_pages);
+
+			if (likely(nr_free != FREE_ALL_PAGES))
+				nr_free -= freed_pages;
+
+			if (NUM_PAGES_TO_ALLOC >= nr_free)
+				npages_to_free = nr_free;
+			else
+				npages_to_free = NUM_PAGES_TO_ALLOC;
+
+			freed_pages = 0;
+
+			/* free all so restart the processing */
+			if (nr_free)
+				goto restart;
+
+			/* Not allowed to fall through or break because
+			 * following context is inside spinlock while we are
+			 * outside here.
+			 */
+			goto out;
+
+		}
+	}
+
+	/* remove range of pages from the pool */
+	if (freed_pages) {
+		ttm_pool_update_free_locked(pool, freed_pages);
+		nr_free -= freed_pages;
+	}
+
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+	if (freed_pages)
+		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+	kfree(pages_to_free);
+	return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+	struct device_pools *p;
+	struct dma_pool *pool;
+
+	if (!dev)
+		return;
+
+	mutex_lock(&_manager->lock);
+	list_for_each_entry_reverse(p, &_manager->pools, pools) {
+		if (p->dev != dev)
+			continue;
+		pool = p->pool;
+		if (pool->type != type)
+			continue;
+
+		list_del(&p->pools);
+		kfree(p);
+		_manager->npools--;
+		break;
+	}
+	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+		if (pool->type != type)
+			continue;
+		/* Takes a spinlock.. */
+		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+		/* This code path is called after _all_ references to the
+		 * struct device has been dropped - so nobody should be
+		 * touching it. In case somebody is trying to _add_ we are
+		 * guarded by the mutex. */
+		list_del(&pool->pools);
+		kfree(pool);
+		break;
+	}
+	mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+	struct dma_pool *pool = *(struct dma_pool **)res;
+
+	if (pool)
+		ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+	return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+					  enum pool_type type)
+{
+	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+	struct device_pools *sec_pool = NULL;
+	struct dma_pool *pool = NULL, **ptr;
+	unsigned i;
+	int ret = -ENODEV;
+	char *p;
+
+	if (!dev)
+		return NULL;
+
+	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return NULL;
+
+	ret = -ENOMEM;
+
+	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+			    dev_to_node(dev));
+	if (!pool)
+		goto err_mem;
+
+	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+				dev_to_node(dev));
+	if (!sec_pool)
+		goto err_mem;
+
+	INIT_LIST_HEAD(&sec_pool->pools);
+	sec_pool->dev = dev;
+	sec_pool->pool =  pool;
+
+	INIT_LIST_HEAD(&pool->free_list);
+	INIT_LIST_HEAD(&pool->inuse_list);
+	INIT_LIST_HEAD(&pool->pools);
+	spin_lock_init(&pool->lock);
+	pool->dev = dev;
+	pool->npages_free = pool->npages_in_use = 0;
+	pool->nfrees = 0;
+	pool->gfp_flags = flags;
+	pool->size = PAGE_SIZE;
+	pool->type = type;
+	pool->nrefills = 0;
+	p = pool->name;
+	for (i = 0; i < 5; i++) {
+		if (type & t[i]) {
+			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+				      "%s", n[i]);
+		}
+	}
+	*p = 0;
+	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+	 * - the kobj->name has already been deallocated.*/
+	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+		 dev_driver_string(dev), dev_name(dev));
+	mutex_lock(&_manager->lock);
+	/* You can get the dma_pool from either the global: */
+	list_add(&sec_pool->pools, &_manager->pools);
+	_manager->npools++;
+	/* or from 'struct device': */
+	list_add(&pool->pools, &dev->dma_pools);
+	mutex_unlock(&_manager->lock);
+
+	*ptr = pool;
+	devres_add(dev, ptr);
+
+	return pool;
+err_mem:
+	devres_free(ptr);
+	kfree(sec_pool);
+	kfree(pool);
+	return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+					  enum pool_type type)
+{
+	struct dma_pool *pool, *tmp, *found = NULL;
+
+	if (type == IS_UNDEFINED)
+		return found;
+
+	/* NB: We iterate on the 'struct dev' which has no spinlock, but
+	 * it does have a kref which we have taken. The kref is taken during
+	 * graphic driver loading - in the drm_pci_init it calls either
+	 * pci_dev_get or pci_register_driver which both end up taking a kref
+	 * on 'struct device'.
+	 *
+	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+	 * thing is at that point of time there are no pages associated with the
+	 * driver so this function will not be called.
+	 */
+	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+		if (pool->type != type)
+			continue;
+		found = pool;
+		break;
+	}
+	return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+						 struct list_head *d_pages,
+						 struct page **failed_pages,
+						 unsigned cpages)
+{
+	struct dma_page *d_page, *tmp;
+	struct page *p;
+	unsigned i = 0;
+
+	p = failed_pages[0];
+	if (!p)
+		return;
+	/* Find the failed page. */
+	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+		if (d_page->p != p)
+			continue;
+		/* .. and then progress over the full list. */
+		list_del(&d_page->page_list);
+		__ttm_dma_free_page(pool, d_page);
+		if (++i < cpages)
+			p = failed_pages[i];
+		else
+			break;
+	}
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+					struct list_head *d_pages,
+					unsigned count)
+{
+	struct page **caching_array;
+	struct dma_page *dma_p;
+	struct page *p;
+	int r = 0;
+	unsigned i, cpages;
+	unsigned max_cpages = min(count,
+			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+	/* allocate array for page caching change */
+	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+	if (!caching_array) {
+		pr_err("%s: Unable to allocate table for new pages\n",
+		       pool->dev_name);
+		return -ENOMEM;
+	}
+
+	if (count > 1) {
+		pr_debug("%s: (%s:%d) Getting %d pages\n",
+			 pool->dev_name, pool->name, current->pid, count);
+	}
+
+	for (i = 0, cpages = 0; i < count; ++i) {
+		dma_p = __ttm_dma_alloc_page(pool);
+		if (!dma_p) {
+			pr_err("%s: Unable to get page %u\n",
+			       pool->dev_name, i);
+
+			/* store already allocated pages in the pool after
+			 * setting the caching state */
+			if (cpages) {
+				r = ttm_set_pages_caching(pool, caching_array,
+							  cpages);
+				if (r)
+					ttm_dma_handle_caching_state_failure(
+						pool, d_pages, caching_array,
+						cpages);
+			}
+			r = -ENOMEM;
+			goto out;
+		}
+		p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+		/* gfp flags of highmem page should never be dma32 so we
+		 * we should be fine in such case
+		 */
+		if (!PageHighMem(p))
+#endif
+		{
+			caching_array[cpages++] = p;
+			if (cpages == max_cpages) {
+				/* Note: Cannot hold the spinlock */
+				r = ttm_set_pages_caching(pool, caching_array,
+						 cpages);
+				if (r) {
+					ttm_dma_handle_caching_state_failure(
+						pool, d_pages, caching_array,
+						cpages);
+					goto out;
+				}
+				cpages = 0;
+			}
+		}
+		list_add(&dma_p->page_list, d_pages);
+	}
+
+	if (cpages) {
+		r = ttm_set_pages_caching(pool, caching_array, cpages);
+		if (r)
+			ttm_dma_handle_caching_state_failure(pool, d_pages,
+					caching_array, cpages);
+	}
+out:
+	kfree(caching_array);
+	return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+					 unsigned long *irq_flags)
+{
+	unsigned count = _manager->options.small;
+	int r = pool->npages_free;
+
+	if (count > pool->npages_free) {
+		struct list_head d_pages;
+
+		INIT_LIST_HEAD(&d_pages);
+
+		spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+		/* Returns how many more are neccessary to fulfill the
+		 * request. */
+		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+		spin_lock_irqsave(&pool->lock, *irq_flags);
+		if (!r) {
+			/* Add the fresh to the end.. */
+			list_splice(&d_pages, &pool->free_list);
+			++pool->nrefills;
+			pool->npages_free += count;
+			r = count;
+		} else {
+			struct dma_page *d_page;
+			unsigned cpages = 0;
+
+			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
+			       pool->dev_name, pool->name, r);
+
+			list_for_each_entry(d_page, &d_pages, page_list) {
+				cpages++;
+			}
+			list_splice_tail(&d_pages, &pool->free_list);
+			pool->npages_free += cpages;
+			r = cpages;
+		}
+	}
+	return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+				  struct ttm_dma_tt *ttm_dma,
+				  unsigned index)
+{
+	struct dma_page *d_page;
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+	unsigned long irq_flags;
+	int count, r = -ENOMEM;
+
+	spin_lock_irqsave(&pool->lock, irq_flags);
+	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+	if (count) {
+		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+		ttm->pages[index] = d_page->p;
+		ttm_dma->dma_address[index] = d_page->dma;
+		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+		r = 0;
+		pool->npages_in_use += 1;
+		pool->npages_free -= 1;
+	}
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+	return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+	struct dma_pool *pool;
+	enum pool_type type;
+	unsigned i;
+	gfp_t gfp_flags;
+	int ret;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+		gfp_flags = GFP_USER | GFP_DMA32;
+	else
+		gfp_flags = GFP_HIGHUSER;
+	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+		gfp_flags |= __GFP_ZERO;
+
+	pool = ttm_dma_find_pool(dev, type);
+	if (!pool) {
+		pool = ttm_dma_pool_init(dev, gfp_flags, type);
+		if (IS_ERR_OR_NULL(pool)) {
+			return -ENOMEM;
+		}
+	}
+
+	INIT_LIST_HEAD(&ttm_dma->pages_list);
+	for (i = 0; i < ttm->num_pages; ++i) {
+		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+		if (ret != 0) {
+			ttm_dma_unpopulate(ttm_dma, dev);
+			return -ENOMEM;
+		}
+
+		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+						false, false);
+		if (unlikely(ret != 0)) {
+			ttm_dma_unpopulate(ttm_dma, dev);
+			return -ENOMEM;
+		}
+	}
+
+	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+		ret = ttm_tt_swapin(ttm);
+		if (unlikely(ret != 0)) {
+			ttm_dma_unpopulate(ttm_dma, dev);
+			return ret;
+		}
+	}
+
+	ttm->state = tt_unbound;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+	struct device_pools *p;
+	unsigned total = 0;
+
+	mutex_lock(&_manager->lock);
+	list_for_each_entry(p, &_manager->pools, pools)
+		total += p->pool->npages_free;
+	mutex_unlock(&_manager->lock);
+	return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+	struct dma_pool *pool;
+	struct dma_page *d_page, *next;
+	enum pool_type type;
+	bool is_cached = false;
+	unsigned count = 0, i, npages = 0;
+	unsigned long irq_flags;
+
+	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+	pool = ttm_dma_find_pool(dev, type);
+	if (!pool)
+		return;
+
+	is_cached = (ttm_dma_find_pool(pool->dev,
+		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+	/* make sure pages array match list and count number of pages */
+	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+		ttm->pages[count] = d_page->p;
+		count++;
+	}
+
+	spin_lock_irqsave(&pool->lock, irq_flags);
+	pool->npages_in_use -= count;
+	if (is_cached) {
+		pool->nfrees += count;
+	} else {
+		pool->npages_free += count;
+		list_splice(&ttm_dma->pages_list, &pool->free_list);
+		npages = count;
+		if (pool->npages_free > _manager->options.max_size) {
+			npages = pool->npages_free - _manager->options.max_size;
+			/* free at least NUM_PAGES_TO_ALLOC number of pages
+			 * to reduce calls to set_memory_wb */
+			if (npages < NUM_PAGES_TO_ALLOC)
+				npages = NUM_PAGES_TO_ALLOC;
+		}
+	}
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+	if (is_cached) {
+		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+			ttm_mem_global_free_page(ttm->glob->mem_glob,
+						 d_page->p);
+			ttm_dma_page_put(pool, d_page);
+		}
+	} else {
+		for (i = 0; i < count; i++) {
+			ttm_mem_global_free_page(ttm->glob->mem_glob,
+						 ttm->pages[i]);
+		}
+	}
+
+	INIT_LIST_HEAD(&ttm_dma->pages_list);
+	for (i = 0; i < ttm->num_pages; i++) {
+		ttm->pages[i] = NULL;
+		ttm_dma->dma_address[i] = 0;
+	}
+
+	/* shrink pool if necessary (only on !is_cached pools)*/
+	if (npages)
+		ttm_dma_page_pool_free(pool, npages);
+	ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+				  struct shrink_control *sc)
+{
+	static atomic_t start_pool = ATOMIC_INIT(0);
+	unsigned idx = 0;
+	unsigned pool_offset = atomic_add_return(1, &start_pool);
+	unsigned shrink_pages = sc->nr_to_scan;
+	struct device_pools *p;
+
+	if (list_empty(&_manager->pools))
+		return 0;
+
+	mutex_lock(&_manager->lock);
+	pool_offset = pool_offset % _manager->npools;
+	list_for_each_entry(p, &_manager->pools, pools) {
+		unsigned nr_free;
+
+		if (!p->dev)
+			continue;
+		if (shrink_pages == 0)
+			break;
+		/* Do it in round-robin fashion. */
+		if (++idx < pool_offset)
+			continue;
+		nr_free = shrink_pages;
+		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+			 p->pool->dev_name, p->pool->name, current->pid,
+			 nr_free, shrink_pages);
+	}
+	mutex_unlock(&_manager->lock);
+	/* return estimated number of unused pages in pool */
+	return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+	manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+	manager->mm_shrink.seeks = 1;
+	register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+	unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+	int ret = -ENOMEM;
+
+	WARN_ON(_manager);
+
+	pr_info("Initializing DMA pool allocator\n");
+
+	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+	if (!_manager)
+		goto err;
+
+	mutex_init(&_manager->lock);
+	INIT_LIST_HEAD(&_manager->pools);
+
+	_manager->options.max_size = max_pages;
+	_manager->options.small = SMALL_ALLOCATION;
+	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+	/* This takes care of auto-freeing the _manager */
+	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+				   &glob->kobj, "dma_pool");
+	if (unlikely(ret != 0)) {
+		kobject_put(&_manager->kobj);
+		goto err;
+	}
+	ttm_dma_pool_mm_shrink_init(_manager);
+	return 0;
+err:
+	return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+	struct device_pools *p, *t;
+
+	pr_info("Finalizing DMA pool allocator\n");
+	ttm_dma_pool_mm_shrink_fini(_manager);
+
+	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+			current->pid);
+		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+			ttm_dma_pool_match, p->pool));
+		ttm_dma_free_pool(p->dev, p->pool->type);
+	}
+	kobject_put(&_manager->kobj);
+	_manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+	struct device_pools *p;
+	struct dma_pool *pool = NULL;
+	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+		     "name", "virt", "busaddr"};
+
+	if (!_manager) {
+		seq_printf(m, "No pool allocator running.\n");
+		return 0;
+	}
+	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+		   h[0], h[1], h[2], h[3], h[4], h[5]);
+	mutex_lock(&_manager->lock);
+	list_for_each_entry(p, &_manager->pools, pools) {
+		struct device *dev = p->dev;
+		if (!dev)
+			continue;
+		pool = p->pool;
+		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+				pool->name, pool->nrefills,
+				pool->nfrees, pool->npages_in_use,
+				pool->npages_free,
+				pool->dev_name);
+	}
+	mutex_unlock(&_manager->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);


Property changes on: trunk/sys/dev/drm2/ttm/ttm_page_alloc_dma.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_placement.h
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_placement.h	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_placement.h	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,94 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_placement.h 247835 2013-03-05 09:49:34Z kib $ */
+
+#ifndef _TTM_PLACEMENT_H_
+#define _TTM_PLACEMENT_H_
+/*
+ * Memory regions for data placement.
+ */
+
+#define TTM_PL_SYSTEM           0
+#define TTM_PL_TT               1
+#define TTM_PL_VRAM             2
+#define TTM_PL_PRIV0            3
+#define TTM_PL_PRIV1            4
+#define TTM_PL_PRIV2            5
+#define TTM_PL_PRIV3            6
+#define TTM_PL_PRIV4            7
+#define TTM_PL_PRIV5            8
+#define TTM_PL_SWAPPED          15
+
+#define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
+#define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
+#define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
+#define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
+#define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
+#define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
+#define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
+#define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
+#define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
+#define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
+#define TTM_PL_MASK_MEM         0x0000FFFF
+
+/*
+ * Other flags that affects data placement.
+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
+ * if available.
+ * TTM_PL_FLAG_SHARED means that another application may
+ * reference the buffer.
+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
+ * be evicted to make room for other buffers.
+ */
+
+#define TTM_PL_FLAG_CACHED      (1 << 16)
+#define TTM_PL_FLAG_UNCACHED    (1 << 17)
+#define TTM_PL_FLAG_WC          (1 << 18)
+#define TTM_PL_FLAG_SHARED      (1 << 20)
+#define TTM_PL_FLAG_NO_EVICT    (1 << 21)
+
+#define TTM_PL_MASK_CACHING     (TTM_PL_FLAG_CACHED | \
+				 TTM_PL_FLAG_UNCACHED | \
+				 TTM_PL_FLAG_WC)
+
+#define TTM_PL_MASK_MEMTYPE     (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
+
+/*
+ * Access flags to be used for CPU- and GPU- mappings.
+ * The idea is that the TTM synchronization mechanism will
+ * allow concurrent READ access and exclusive write access.
+ * Currently GPU- and CPU accesses are exclusive.
+ */
+
+#define TTM_ACCESS_READ         (1 << 0)
+#define TTM_ACCESS_WRITE        (1 << 1)
+
+#endif


Property changes on: trunk/sys/dev/drm2/ttm/ttm_placement.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/drm2/ttm/ttm_tt.c
===================================================================
--- trunk/sys/dev/drm2/ttm/ttm_tt.c	                        (rev 0)
+++ trunk/sys/dev/drm2/ttm/ttm_tt.c	2018-05-28 00:14:02 UTC (rev 10117)
@@ -0,0 +1,370 @@
+/* $MidnightBSD$ */
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib at FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/ttm/ttm_tt.c 254875 2013-08-25 15:12:26Z dumbbell $");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+
+MALLOC_DEFINE(M_TTM_PD, "ttm_pd", "TTM Page Directories");
+
+/**
+ * Allocates storage for pointers to the pages that back the ttm.
+ */
+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
+{
+	ttm->pages = malloc(ttm->num_pages * sizeof(void *),
+	    M_TTM_PD, M_WAITOK | M_ZERO);
+}
+
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+{
+	ttm->ttm.pages = malloc(ttm->ttm.num_pages * sizeof(void *),
+	    M_TTM_PD, M_WAITOK | M_ZERO);
+	ttm->dma_address = malloc(ttm->ttm.num_pages *
+	    sizeof(*ttm->dma_address), M_TTM_PD, M_WAITOK);
+}
+
+#if defined(__i386__) || defined(__amd64__)
+static inline int ttm_tt_set_page_caching(vm_page_t p,
+					  enum ttm_caching_state c_old,
+					  enum ttm_caching_state c_new)
+{
+
+	/* XXXKIB our VM does not need this. */
+#if 0
+	if (c_old != tt_cached) {
+		/* p isn't in the default caching state, set it to
+		 * writeback first to free its current memtype. */
+		pmap_page_set_memattr(p, VM_MEMATTR_WRITE_BACK);
+	}
+#endif
+
+	if (c_new == tt_wc)
+		pmap_page_set_memattr(p, VM_MEMATTR_WRITE_COMBINING);
+	else if (c_new == tt_uncached)
+		pmap_page_set_memattr(p, VM_MEMATTR_UNCACHEABLE);
+
+	return (0);
+}
+#else
+static inline int ttm_tt_set_page_caching(vm_page_t p,
+					  enum ttm_caching_state c_old,
+					  enum ttm_caching_state c_new)
+{
+	return 0;
+}
+#endif
+
+/*
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
+ */
+
+static int ttm_tt_set_caching(struct ttm_tt *ttm,
+			      enum ttm_caching_state c_state)
+{
+	int i, j;
+	vm_page_t cur_page;
+	int ret;
+
+	if (ttm->caching_state == c_state)
+		return 0;
+
+	if (ttm->state == tt_unpopulated) {
+		/* Change caching but don't populate */
+		ttm->caching_state = c_state;
+		return 0;
+	}
+
+	if (ttm->caching_state == tt_cached)
+		drm_clflush_pages(ttm->pages, ttm->num_pages);
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		cur_page = ttm->pages[i];
+		if (likely(cur_page != NULL)) {
+			ret = ttm_tt_set_page_caching(cur_page,
+						      ttm->caching_state,
+						      c_state);
+			if (unlikely(ret != 0))
+				goto out_err;
+		}
+	}
+
+	ttm->caching_state = c_state;
+
+	return 0;
+
+out_err:
+	for (j = 0; j < i; ++j) {
+		cur_page = ttm->pages[j];
+		if (cur_page != NULL) {
+			(void)ttm_tt_set_page_caching(cur_page, c_state,
+						      ttm->caching_state);
+		}
+	}
+
+	return ret;
+}
+
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+{
+	enum ttm_caching_state state;
+
+	if (placement & TTM_PL_FLAG_WC)
+		state = tt_wc;
+	else if (placement & TTM_PL_FLAG_UNCACHED)
+		state = tt_uncached;
+	else
+		state = tt_cached;
+
+	return ttm_tt_set_caching(ttm, state);
+}
+
+void ttm_tt_destroy(struct ttm_tt *ttm)
+{
+	if (unlikely(ttm == NULL))
+		return;
+
+	if (ttm->state == tt_bound) {
+		ttm_tt_unbind(ttm);
+	}
+
+	if (likely(ttm->pages != NULL)) {
+		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+	}
+
+	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
+	    ttm->swap_storage)
+		vm_object_deallocate(ttm->swap_storage);
+
+	ttm->swap_storage = NULL;
+	ttm->func->destroy(ttm);
+}
+
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+		unsigned long size, uint32_t page_flags,
+		vm_page_t dummy_read_page)
+{
+	ttm->bdev = bdev;
+	ttm->glob = bdev->glob;
+	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	ttm->caching_state = tt_cached;
+	ttm->page_flags = page_flags;
+	ttm->dummy_read_page = dummy_read_page;
+	ttm->state = tt_unpopulated;
+	ttm->swap_storage = NULL;
+
+	ttm_tt_alloc_page_directory(ttm);
+	if (!ttm->pages) {
+		ttm_tt_destroy(ttm);
+		printf("Failed allocating page table\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void ttm_tt_fini(struct ttm_tt *ttm)
+{
+	free(ttm->pages, M_TTM_PD);
+	ttm->pages = NULL;
+}
+
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+		unsigned long size, uint32_t page_flags,
+		vm_page_t dummy_read_page)
+{
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+
+	ttm->bdev = bdev;
+	ttm->glob = bdev->glob;
+	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	ttm->caching_state = tt_cached;
+	ttm->page_flags = page_flags;
+	ttm->dummy_read_page = dummy_read_page;
+	ttm->state = tt_unpopulated;
+	ttm->swap_storage = NULL;
+
+	INIT_LIST_HEAD(&ttm_dma->pages_list);
+	ttm_dma_tt_alloc_page_directory(ttm_dma);
+	if (!ttm->pages || !ttm_dma->dma_address) {
+		ttm_tt_destroy(ttm);
+		printf("Failed allocating page table\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+
+	free(ttm->pages, M_TTM_PD);
+	ttm->pages = NULL;
+	free(ttm_dma->dma_address, M_TTM_PD);
+	ttm_dma->dma_address = NULL;
+}
+
+void ttm_tt_unbind(struct ttm_tt *ttm)
+{
+	int ret;
+
+	if (ttm->state == tt_bound) {
+		ret = ttm->func->unbind(ttm);
+		MPASS(ret == 0);
+		ttm->state = tt_unbound;
+	}
+}
+
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+	int ret = 0;
+
+	if (!ttm)
+		return -EINVAL;
+
+	if (ttm->state == tt_bound)
+		return 0;
+
+	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+	if (ret)
+		return ret;
+
+	ret = ttm->func->bind(ttm, bo_mem);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ttm->state = tt_bound;
+
+	return 0;
+}
+
+int ttm_tt_swapin(struct ttm_tt *ttm)
+{
+	vm_object_t obj;
+	vm_page_t from_page, to_page;
+	int i, ret, rv;
+
+	obj = ttm->swap_storage;
+
+	VM_OBJECT_WLOCK(obj);
+	vm_object_pip_add(obj, 1);
+	for (i = 0; i < ttm->num_pages; ++i) {
+		from_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
+		if (from_page->valid != VM_PAGE_BITS_ALL) {
+			if (vm_pager_has_page(obj, i, NULL, NULL)) {
+				rv = vm_pager_get_pages(obj, &from_page, 1, 0);
+				if (rv != VM_PAGER_OK) {
+					vm_page_lock(from_page);
+					vm_page_free(from_page);
+					vm_page_unlock(from_page);
+					ret = -EIO;
+					goto err_ret;
+				}
+			} else
+				vm_page_zero_invalid(from_page, TRUE);
+		}
+		vm_page_xunbusy(from_page);
+		to_page = ttm->pages[i];
+		if (unlikely(to_page == NULL)) {
+			ret = -ENOMEM;
+			goto err_ret;
+		}
+		pmap_copy_page(from_page, to_page);
+	}
+	vm_object_pip_wakeup(obj);
+	VM_OBJECT_WUNLOCK(obj);
+
+	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
+		vm_object_deallocate(obj);
+	ttm->swap_storage = NULL;
+	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+	return (0);
+
+err_ret:
+	vm_object_pip_wakeup(obj);
+	VM_OBJECT_WUNLOCK(obj);
+	return (ret);
+}
+
+int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
+{
+	vm_object_t obj;
+	vm_page_t from_page, to_page;
+	int i;
+
+	MPASS(ttm->state == tt_unbound || ttm->state == tt_unpopulated);
+	MPASS(ttm->caching_state == tt_cached);
+
+	if (persistent_swap_storage == NULL) {
+		obj = vm_pager_allocate(OBJT_SWAP, NULL,
+		    IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0,
+		    curthread->td_ucred);
+		if (obj == NULL) {
+			printf("[TTM] Failed allocating swap storage\n");
+			return (-ENOMEM);
+		}
+	} else
+		obj = persistent_swap_storage;
+
+	VM_OBJECT_WLOCK(obj);
+	vm_object_pip_add(obj, 1);
+	for (i = 0; i < ttm->num_pages; ++i) {
+		from_page = ttm->pages[i];
+		if (unlikely(from_page == NULL))
+			continue;
+		to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
+		pmap_copy_page(from_page, to_page);
+		to_page->valid = VM_PAGE_BITS_ALL;
+		vm_page_dirty(to_page);
+		vm_page_xunbusy(to_page);
+	}
+	vm_object_pip_wakeup(obj);
+	VM_OBJECT_WUNLOCK(obj);
+
+	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+	ttm->swap_storage = obj;
+	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+	if (persistent_swap_storage != NULL)
+		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
+	return (0);
+}


Property changes on: trunk/sys/dev/drm2/ttm/ttm_tt.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property


More information about the Midnightbsd-cvs mailing list